diff --git a/NOTES b/NOTES index 49d372099..6b8e088ea 100644 --- a/NOTES +++ b/NOTES @@ -1,10 +1,10 @@ - DB Profile + Index Column ┌───────────▼────────────────────────────┐ │0000000000000000000000000000000000000000│ │0000000000000000000000000000000000000000│ │0000000000000000000000000000000000000000│ -Bitmap──▶0000000000000000000000000000000000000000│ + Row──▶0000000000000000000000000000000000000000│ │0000000000000000000000000000000000000000│ │────────────────────────────────────────┤ │0000000000000000000000000000000000000000│ diff --git a/README.md b/README.md index 9943bf15f..acab512eb 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # pilosa -Pilosa is a bitmap index database. +Pilosa is a bitmap index. [![Build Status](https://travis-ci.com/pilosa/pilosa.svg?token=Peb4jvQ3kLbjUEhpU5aR&branch=master)](https://travis-ci.com/pilosa/pilosa) @@ -123,60 +123,60 @@ Return the version of Pilosa: $ curl "http://127.0.0.1:10101/version" ``` -Return a list of all databases and frames in the index: +Return a list of all indexes and frames in the index: ```sh $ curl "http://127.0.0.1:10101/schema" ``` -### Database and Frame Schema +### Index and Frame Schema -Before running a query, the corresponding database and frame must be created. Note that database and frame names can contain only lower case letters, numbers, dash (`-`), underscore (`_`) and dot (`.`). +Before running a query, the corresponding index and frame must be created. Note that index and frame names can contain only lower case letters, numbers, dash (`-`), underscore (`_`) and dot (`.`). -You can create the database `sample-db` using: +You can create the index `sample-idx` using: ```sh -$ curl -XPOST "http://127.0.0.1:10101/db" \ - -d '{"db": "sample-db"}' +$ curl -XPOST "http://127.0.0.1:10101/index" \ + -d '{"index": "sample-idx"}' ``` -Optionally, you can specify the column label on database creation: +Optionally, you can specify the column label on index creation: ```sh -$ curl -XPOST "http://127.0.0.1:10101/db" \ - -d '{"db": "sample-db", "options": {"columnLabel": "user"}}' +$ curl -XPOST "http://127.0.0.1:10101/index" \ + -d '{"index": "sample-idx", "options": {"columnLabel": "user"}}' ``` The frame `collaboration` may be created using the following call: ```sh $ curl -XPOST "http://127.0.0.1:10101/frame" \ - -d '{"db": "sample-db", "frame": "collaboration"}' + -d '{"index": "sample-idx", "frame": "collaboration"}' ``` It is possible to specify the frame row label on frame creation: ```sh $ curl -XPOST "http://127.0.0.1:10101/frame" \ - -d '{"db": "sample-db", "frame": "collaboration", "options": {"rowLabel": "project"}}' + -d '{"index": "sample-idx", "frame": "collaboration", "options": {"rowLabel": "project"}}' ``` ### Queries Queries to Pilosa require sending a POST request where the query itself is sent as POST data. -You specify the database on which to perform the query with a URL argument `db=database-name`. +You specify the index on which to perform the query with a URL argument `index=index-name`. -In this section, we assume both the database `sample-db` with column label `user` and the frame `collaboration` with row label `project` was created. +In this section, we assume both the index `sample-idx` with column label `user` and the frame `collaboration` with row label `project` was created. -A query sent to database `sample-db` will have the following format: +A query sent to index `sample-idx` will have the following format: ```sh -$ curl -X POST "http://127.0.0.1:10101/query?db=sample-db" -d 'Query()' +$ curl -X POST "http://127.0.0.1:10101/query?index=sample-idx" -d 'Query()' ``` The `Query()` object referenced above should be made up of one or more of the query types listed below. So for example, a SetBit() query would look like this: ```sh -$ curl -X POST "http://127.0.0.1:10101/query?db=sample-db" -d 'SetBit(project=10, frame="collaboration", user=1)' +$ curl -X POST "http://127.0.0.1:10101/query?index=sample-idx" -d 'SetBit(project=10, frame="collaboration", user=1)' ``` Query results have the format `{"results":[]}`, where `results` is a list of results for each `Query()`. This @@ -184,7 +184,7 @@ means that you can provide multiple `Query()` objects with each HTTP request and the results of all of the queries. ```sh -$ curl -X POST "http://127.0.0.1:10101/query?db=sample-db" -d 'Query() Query() Query()' +$ curl -X POST "http://127.0.0.1:10101/query?index=sample-idx" -d 'Query() Query() Query()' ``` --- @@ -209,17 +209,17 @@ A return value of `{"results":[true]}` indicates that the bit was toggled from 1 A return value of `{"results":[false]}` indicates that the bit was already set to 0 and therefore nothing changed. --- -#### SetBitmapAttrs() +#### SetRowAttrs() ``` -SetBitmapAttrs(project=10, frame="collaboration", stars=123, url="http://projects.pilosa.com/10", active=true) +SetRowAttrs(project=10, frame="collaboration", stars=123, url="http://projects.pilosa.com/10", active=true) ``` Returns `{"results":[null]}` --- -#### SetProfileAttrs() +#### SetColumnAttrs() --- ``` -SetProfileAttrs(user=10, friends=123, username="mrpi", active=true) +SetColumnAttrs(user=10, friends=123, username="mrpi", active=true) ``` Returns `{"results":[null]}` @@ -230,11 +230,11 @@ Returns `{"results":[null]}` Bitmap(project=10, frame="collaboration") ``` Returns `{"results":[{"attrs":{"stars":123, "url":"http://projects.pilosa.com/10", "active":true},"bits":[1,2]}]}` where `attrs` are the -attributes set using `SetBitmapAttrs()` and `bits` are the bits set using `SetBit()`. +attributes set using `SetRowAttrs()` and `bits` are the bits set using `SetBit()`. -In order to return profile attributes attached to the profiles of a bitmap, add `&profiles=true` to the query string. Sample response: +In order to return column attributes attached to the columns of a bitmap, add `&columnAttrs=true` to the query string. Sample response: ``` -{"results":[{"attrs":{},"bits":[10]}],"profiles":[{"user":10,"attrs":{"friends":123, "username":"mrpi", "active":true}}]} +{"results":[{"attrs":{},"bits":[10]}],"columnAttrs":[{"user":10,"attrs":{"friends":123, "username":"mrpi", "active":true}}]} ``` --- diff --git a/attr.go b/attr.go index f619dda9f..3859a1dc7 100644 --- a/attr.go +++ b/attr.go @@ -226,7 +226,7 @@ func (s *AttrStore) BlockData(i uint64) (map[uint64]map[string]interface{}, erro return m, nil } -// txAttrs returns a map of attributes for a bitmap. +// txAttrs returns a map of attributes for an id. func txAttrs(tx *bolt.Tx, id uint64) (map[string]interface{}, error) { v := tx.Bucket([]byte("attrs")).Get(u64tob(id)) if v == nil { diff --git a/attr_test.go b/attr_test.go index f026f6244..cfc31d54b 100644 --- a/attr_test.go +++ b/attr_test.go @@ -9,7 +9,7 @@ import ( "github.com/pilosa/pilosa" ) -// Ensure database can set and retrieve profile attributes. +// Ensure database can set and retrieve column attributes. func TestAttrStore_Attrs(t *testing.T) { s := MustOpenAttrStore() defer s.Close() @@ -23,14 +23,14 @@ func TestAttrStore_Attrs(t *testing.T) { t.Fatal(err) } - // Retrieve attributes for profile #1. + // Retrieve attributes for column #1. if m, err := s.Attrs(1); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(m, map[string]interface{}{"A": int64(100), "B": "VALUE", "C": int64(-27)}) { t.Fatalf("unexpected attrs(1): %#v", m) } - // Retrieve attributes for profile #2. + // Retrieve attributes for column #2. if m, err := s.Attrs(2); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(m, map[string]interface{}{"A": int64(200)}) { diff --git a/broadcast.go b/broadcast.go index a3f3cb6c9..9e04d04f6 100644 --- a/broadcast.go +++ b/broadcast.go @@ -34,6 +34,11 @@ func (s *StaticNodeSet) Open() error { return nil } +func (s *StaticNodeSet) Join(nodes []*Node) error { + s.nodes = nodes + return nil +} + // Broadcaster is an interface for broadcasting messages. type Broadcaster interface { SendSync(pb proto.Message) error @@ -84,8 +89,8 @@ var NopBroadcastReceiver = &nopBroadcastReceiver{} const ( MessageTypeCreateSlice = 1 - MessageTypeCreateDB = 2 - MessageTypeDeleteDB = 3 + MessageTypeCreateIndex = 2 + MessageTypeDeleteIndex = 3 MessageTypeCreateFrame = 4 MessageTypeDeleteFrame = 5 ) @@ -95,10 +100,10 @@ func MarshalMessage(m proto.Message) ([]byte, error) { switch obj := m.(type) { case *internal.CreateSliceMessage: typ = MessageTypeCreateSlice - case *internal.CreateDBMessage: - typ = MessageTypeCreateDB - case *internal.DeleteDBMessage: - typ = MessageTypeDeleteDB + case *internal.CreateIndexMessage: + typ = MessageTypeCreateIndex + case *internal.DeleteIndexMessage: + typ = MessageTypeDeleteIndex case *internal.CreateFrameMessage: typ = MessageTypeCreateFrame case *internal.DeleteFrameMessage: @@ -120,10 +125,10 @@ func UnmarshalMessage(buf []byte) (proto.Message, error) { switch typ { case MessageTypeCreateSlice: m = &internal.CreateSliceMessage{} - case MessageTypeCreateDB: - m = &internal.CreateDBMessage{} - case MessageTypeDeleteDB: - m = &internal.DeleteDBMessage{} + case MessageTypeCreateIndex: + m = &internal.CreateIndexMessage{} + case MessageTypeDeleteIndex: + m = &internal.DeleteIndexMessage{} case MessageTypeCreateFrame: m = &internal.CreateFrameMessage{} case MessageTypeDeleteFrame: diff --git a/broadcast_test.go b/broadcast_test.go index a2842a919..de38239d4 100644 --- a/broadcast_test.go +++ b/broadcast_test.go @@ -13,12 +13,12 @@ import ( func TestMessage_Marshal(t *testing.T) { testMessageMarshal(t, &internal.CreateSliceMessage{ - DB: "d", + Index: "i", Slice: 8, }) - testMessageMarshal(t, &internal.DeleteDBMessage{ - DB: "d", + testMessageMarshal(t, &internal.DeleteIndexMessage{ + Index: "i", }) } @@ -47,8 +47,8 @@ func TestBroadcast_BroadcastReceiver(t *testing.T) { s.BroadcastReceiver = sbr s.BroadcastReceiver.Start(sbh) - msg := &internal.DeleteDBMessage{ - DB: "d", + msg := &internal.DeleteIndexMessage{ + Index: "i", } s.BroadcastReceiver.(*SimpleBroadcastReceiver).Receive(msg) diff --git a/cache.go b/cache.go index 597d65732..5dc4d3188 100644 --- a/cache.go +++ b/cache.go @@ -17,15 +17,15 @@ const ( ThresholdFactor = 1.1 ) -// Cache represents a cache for bitmap counts. +// Cache represents a cache of counts. type Cache interface { - Add(bitmapID uint64, n uint64) - BulkAdd(bitmapID uint64, n uint64) - Get(bitmapID uint64) uint64 + Add(id uint64, n uint64) + BulkAdd(id uint64, n uint64) + Get(id uint64) uint64 Len() int - // Returns a list of all bitmap IDs. - BitmapIDs() []uint64 + // Returns a list of all IDs. + IDs() []uint64 // Updates the cache, if necessary. Invalidate() @@ -58,19 +58,19 @@ func NewLRUCache(maxEntries uint32) *LRUCache { return c } -func (c *LRUCache) BulkAdd(bitmapID, n uint64) { - c.Add(bitmapID, n) +func (c *LRUCache) BulkAdd(id, n uint64) { + c.Add(id, n) } -// Add adds a bitmap to the cache. -func (c *LRUCache) Add(bitmapID, n uint64) { - c.cache.Add(bitmapID, n) - c.counts[bitmapID] = n +// Add adds a count to the cache. +func (c *LRUCache) Add(id, n uint64) { + c.cache.Add(id, n) + c.counts[id] = n } -// Get returns a bitmap with a given id. -func (c *LRUCache) Get(bitmapID uint64) uint64 { - n, _ := c.cache.Get(bitmapID) +// Get returns a count for a given id. +func (c *LRUCache) Get(id uint64) uint64 { + n, _ := c.cache.Get(id) nn, _ := n.(uint64) return nn } @@ -88,8 +88,8 @@ func (c *LRUCache) Recalculate() { c.stats.Gauge("LRUCache", float64(c.cache.Len())) } -// BitmapIDs returns a list of all bitmap IDs in the cache. -func (c *LRUCache) BitmapIDs() []uint64 { +// IDs returns a list of all IDs in the cache. +func (c *LRUCache) IDs() []uint64 { a := make([]uint64, 0, len(c.counts)) for id := range c.counts { a = append(a, id) @@ -153,36 +153,36 @@ func NewRankCache(maxEntries uint32) *RankCache { } } -// Add adds a bitmap to the cache. -func (c *RankCache) Add(bitmapID uint64, n uint64) { +// Add adds a count to the cache. +func (c *RankCache) Add(id uint64, n uint64) { c.mu.Lock() defer c.mu.Unlock() - // Ignore if the bit count on the bitmap is below the threshold. + // Ignore if the bit count is below the threshold. if n < c.thresholdValue { return } - c.entries[bitmapID] = n + c.entries[id] = n c.invalidate() } -// BulkAdd adds a bitmap to the cache unsorted. You should Invalidate after completion. -func (c *RankCache) BulkAdd(bitmapID uint64, n uint64) { +// BulkAdd adds a count to the cache unsorted. You should Invalidate after completion. +func (c *RankCache) BulkAdd(id uint64, n uint64) { c.mu.Lock() defer c.mu.Unlock() if n < c.thresholdValue { return } - c.entries[bitmapID] = n + c.entries[id] = n } -// Get returns a bitmap with a given id. -func (c *RankCache) Get(bitmapID uint64) uint64 { +// Get returns a count for a given id. +func (c *RankCache) Get(id uint64) uint64 { c.mu.Lock() defer c.mu.Unlock() - return c.entries[bitmapID] + return c.entries[id] } // Len returns the number of items in the cache. @@ -192,8 +192,8 @@ func (c *RankCache) Len() int { return len(c.entries) } -// BitmapIDs returns a list of all bitmap IDs in the cache. -func (c *RankCache) BitmapIDs() []uint64 { +// IDs returns a list of all IDs in the cache. +func (c *RankCache) IDs() []uint64 { c.mu.Lock() defer c.mu.Unlock() a := make([]uint64, 0, len(c.entries)) @@ -267,7 +267,7 @@ func (c *RankCache) SetStats(s StatsClient) { c.stats = s } -// Top returns an ordered list of bitmaps. +// Top returns an ordered list of pairs. func (c *RankCache) Top() []BitmapPair { return c.rankings } // WriteTo writes the cache to w. @@ -283,7 +283,7 @@ func (c *RankCache) ReadFrom(r io.Reader) (n int64, err error) { // Ensure RankCache implements Cache. var _ Cache = &RankCache{} -// BitmapPair represents a bitmap with an associated identifier. +// BitmapPair represents a id/count pair with an associated identifier. type BitmapPair struct { ID uint64 Count uint64 @@ -296,7 +296,7 @@ func (p BitmapPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p BitmapPairs) Len() int { return len(p) } func (p BitmapPairs) Less(i, j int) bool { return p[i].Count > p[j].Count } -// Pair holds a bitmap id and its count. +// Pair holds an id/count pair. type Pair struct { ID uint64 `json:"id"` Count uint64 `json:"count"` diff --git a/client.go b/client.go index e4401a69a..7606369ae 100644 --- a/client.go +++ b/client.go @@ -45,18 +45,18 @@ func NewClient(host string) (*Client, error) { // Host returns the host the client was initialized with. func (c *Client) Host() string { return c.host } -// MaxSliceByDatabase returns the number of slices on a server by database. -func (c *Client) MaxSliceByDatabase(ctx context.Context) (map[string]uint64, error) { - return c.maxSliceByDatabase(ctx, false) +// MaxSliceByIndex returns the number of slices on a server by index. +func (c *Client) MaxSliceByIndex(ctx context.Context) (map[string]uint64, error) { + return c.maxSliceByIndex(ctx, false) } -// MaxInverseSliceByDatabase returns the number of inverse slices on a server by database. -func (c *Client) MaxInverseSliceByDatabase(ctx context.Context) (map[string]uint64, error) { - return c.maxSliceByDatabase(ctx, true) +// MaxInverseSliceByIndex returns the number of inverse slices on a server by index. +func (c *Client) MaxInverseSliceByIndex(ctx context.Context) (map[string]uint64, error) { + return c.maxSliceByIndex(ctx, true) } -// maxSliceByDatabase returns the number of slices on a server by database. -func (c *Client) maxSliceByDatabase(ctx context.Context, inverse bool) (map[string]uint64, error) { +// maxSliceByIndex returns the number of slices on a server by index. +func (c *Client) maxSliceByIndex(ctx context.Context, inverse bool) (map[string]uint64, error) { // Execute request against the host. u := url.URL{ Scheme: "http", @@ -90,8 +90,8 @@ func (c *Client) maxSliceByDatabase(ctx context.Context, inverse bool) (map[stri return rsp.MaxSlices, nil } -// Schema returns all database and frame schema information. -func (c *Client) Schema(ctx context.Context) ([]*DBInfo, error) { +// Schema returns all index and frame schema information. +func (c *Client) Schema(ctx context.Context) ([]*IndexInfo, error) { // Execute request against the host. u := url.URL{ Scheme: "http", @@ -118,13 +118,13 @@ func (c *Client) Schema(ctx context.Context) ([]*DBInfo, error) { } else if err := json.NewDecoder(resp.Body).Decode(&rsp); err != nil { return nil, fmt.Errorf("json decode: %s", err) } - return rsp.DBs, nil + return rsp.Indexes, nil } -// CreateDB creates a new database on the server. -func (c *Client) CreateDB(ctx context.Context, db string, opt DBOptions) error { +// CreateIndex creates a new index on the server. +func (c *Client) CreateIndex(ctx context.Context, index string, opt IndexOptions) error { // Encode query request. - buf, err := json.Marshal(&postDBRequest{ + buf, err := json.Marshal(&postIndexRequest{ Options: opt, }) if err != nil { @@ -132,7 +132,7 @@ func (c *Client) CreateDB(ctx context.Context, db string, opt DBOptions) error { } // Create URL & HTTP request. - u := url.URL{Scheme: "http", Host: c.host, Path: fmt.Sprintf("/db/%s", db)} + u := url.URL{Scheme: "http", Host: c.host, Path: fmt.Sprintf("/index/%s", index)} req, err := http.NewRequest("POST", u.String(), bytes.NewReader(buf)) if err != nil { return err @@ -159,20 +159,20 @@ func (c *Client) CreateDB(ctx context.Context, db string, opt DBOptions) error { case http.StatusOK: return nil // ok case http.StatusConflict: - return ErrDatabaseExists + return ErrIndexExists default: return errors.New(string(body)) } } // FragmentNodes returns a list of nodes that own a slice. -func (c *Client) FragmentNodes(ctx context.Context, db string, slice uint64) ([]*Node, error) { +func (c *Client) FragmentNodes(ctx context.Context, index string, slice uint64) ([]*Node, error) { // Execute request against the host. u := url.URL{ Scheme: "http", Host: c.host, Path: "/fragment/nodes", - RawQuery: (url.Values{"db": {db}, "slice": {strconv.FormatUint(slice, 10)}}).Encode(), + RawQuery: (url.Values{"index": {index}, "slice": {strconv.FormatUint(slice, 10)}}).Encode(), } // Build request. @@ -198,10 +198,10 @@ func (c *Client) FragmentNodes(ctx context.Context, db string, slice uint64) ([] return a, nil } -// ExecuteQuery executes query against db on the server. -func (c *Client) ExecuteQuery(ctx context.Context, db, query string, allowRedirect bool) (result interface{}, err error) { - if db == "" { - return nil, ErrDatabaseRequired +// ExecuteQuery executes query against index on the server. +func (c *Client) ExecuteQuery(ctx context.Context, index, query string, allowRedirect bool) (result interface{}, err error) { + if index == "" { + return nil, ErrIndexRequired } else if query == "" { return nil, ErrQueryRequired } @@ -219,7 +219,7 @@ func (c *Client) ExecuteQuery(ctx context.Context, db, query string, allowRedire u := url.URL{ Scheme: "http", Host: c.host, - Path: fmt.Sprintf("/db/%s/query", db), + Path: fmt.Sprintf("/index/%s/query", index), } req, err := http.NewRequest("POST", u.String(), bytes.NewReader(buf)) if err != nil { @@ -254,14 +254,14 @@ func (c *Client) ExecuteQuery(ctx context.Context, db, query string, allowRedire return qresp, nil } -// ExecutePQL executes query string against db on the server. -func (c *Client) ExecutePQL(ctx context.Context, db, query string) (interface{}, error) { +// ExecutePQL executes query string against index on the server. +func (c *Client) ExecutePQL(ctx context.Context, index, query string) (interface{}, error) { u := url.URL{ Scheme: "http", Host: c.host, Path: "/query", RawQuery: url.Values{ - "db": {db}, + "index": {index}, }.Encode(), } @@ -287,20 +287,20 @@ func (c *Client) ExecutePQL(ctx context.Context, db, query string) (interface{}, } // Import bulk imports bits for a single slice to a host. -func (c *Client) Import(ctx context.Context, db, frame string, slice uint64, bits []Bit) error { - if db == "" { - return ErrDatabaseRequired +func (c *Client) Import(ctx context.Context, index, frame string, slice uint64, bits []Bit) error { + if index == "" { + return ErrIndexRequired } else if frame == "" { return ErrFrameRequired } - buf, err := MarshalImportPayload(db, frame, slice, bits) + buf, err := MarshalImportPayload(index, frame, slice, bits) if err != nil { return fmt.Errorf("Error Creating Payload: %s", err) } // Retrieve a list of nodes that own the slice. - nodes, err := c.FragmentNodes(ctx, db, slice) + nodes, err := c.FragmentNodes(ctx, index, slice) if err != nil { return fmt.Errorf("slice nodes: %s", err) } @@ -315,19 +315,19 @@ func (c *Client) Import(ctx context.Context, db, frame string, slice uint64, bit return nil } -func MarshalImportPayload(db, frame string, slice uint64, bits []Bit) ([]byte, error) { - // Separate bitmap and profile IDs to reduce allocations. - bitmapIDs := Bits(bits).BitmapIDs() - profileIDs := Bits(bits).ProfileIDs() +func MarshalImportPayload(index, frame string, slice uint64, bits []Bit) ([]byte, error) { + // Separate row and column IDs to reduce allocations. + rowIDs := Bits(bits).RowIDs() + columnIDs := Bits(bits).ColumnIDs() timestamps := Bits(bits).Timestamps() // Marshal bits to protobufs. buf, err := proto.Marshal(&internal.ImportRequest{ - DB: db, + Index: index, Frame: frame, Slice: slice, - BitmapIDs: bitmapIDs, - ProfileIDs: profileIDs, + RowIDs: rowIDs, + ColumnIDs: columnIDs, Timestamps: timestamps, }) if err != nil { @@ -374,15 +374,15 @@ func (c *Client) importNode(ctx context.Context, node *Node, buf []byte) error { } // ExportCSV bulk exports data for a single slice from a host to CSV format. -func (c *Client) ExportCSV(ctx context.Context, db, frame string, slice uint64, w io.Writer) error { - if db == "" { - return ErrDatabaseRequired +func (c *Client) ExportCSV(ctx context.Context, index, frame string, slice uint64, w io.Writer) error { + if index == "" { + return ErrIndexRequired } else if frame == "" { return ErrFrameRequired } // Retrieve a list of nodes that own the slice. - nodes, err := c.FragmentNodes(ctx, db, slice) + nodes, err := c.FragmentNodes(ctx, index, slice) if err != nil { return fmt.Errorf("slice nodes: %s", err) } @@ -392,7 +392,7 @@ func (c *Client) ExportCSV(ctx context.Context, db, frame string, slice uint64, for _, i := range rand.Perm(len(nodes)) { node := nodes[i] - if err := c.exportNodeCSV(ctx, node, db, frame, slice, w); err != nil { + if err := c.exportNodeCSV(ctx, node, index, frame, slice, w); err != nil { e = fmt.Errorf("export node: host=%s, err=%s", node.Host, err) continue } else { @@ -404,14 +404,14 @@ func (c *Client) ExportCSV(ctx context.Context, db, frame string, slice uint64, } // exportNode copies a CSV export from a node to w. -func (c *Client) exportNodeCSV(ctx context.Context, node *Node, db, frame string, slice uint64, w io.Writer) error { +func (c *Client) exportNodeCSV(ctx context.Context, node *Node, index, frame string, slice uint64, w io.Writer) error { // Create URL. u := url.URL{ Scheme: "http", Host: node.Host, Path: "/export", RawQuery: url.Values{ - "db": {db}, + "index": {index}, "frame": {frame}, "slice": {strconv.FormatUint(slice, 10)}, }.Encode(), @@ -445,9 +445,9 @@ func (c *Client) exportNodeCSV(ctx context.Context, node *Node, db, frame string } // BackupTo backs up an entire frame from a cluster to w. -func (c *Client) BackupTo(ctx context.Context, w io.Writer, db, frame, view string) error { - if db == "" { - return ErrDatabaseRequired +func (c *Client) BackupTo(ctx context.Context, w io.Writer, index, frame, view string) error { + if index == "" { + return ErrIndexRequired } else if frame == "" { return ErrFrameRequired } @@ -456,14 +456,14 @@ func (c *Client) BackupTo(ctx context.Context, w io.Writer, db, frame, view stri tw := tar.NewWriter(w) // Find the maximum number of slices. - maxSlices, err := c.MaxSliceByDatabase(ctx) + maxSlices, err := c.MaxSliceByIndex(ctx) if err != nil { return fmt.Errorf("slice n: %s", err) } // Backup every slice to the tar file. - for i := uint64(0); i <= maxSlices[db]; i++ { - if err := c.backupSliceTo(ctx, tw, db, frame, view, i); err != nil { + for i := uint64(0); i <= maxSlices[index]; i++ { + if err := c.backupSliceTo(ctx, tw, index, frame, view, i); err != nil { return err } } @@ -477,9 +477,9 @@ func (c *Client) BackupTo(ctx context.Context, w io.Writer, db, frame, view stri } // backupSliceTo backs up a single slice to tw. -func (c *Client) backupSliceTo(ctx context.Context, tw *tar.Writer, db, frame, view string, slice uint64) error { +func (c *Client) backupSliceTo(ctx context.Context, tw *tar.Writer, index, frame, view string, slice uint64) error { // Return error if unable to backup from any slice. - r, err := c.BackupSlice(ctx, db, frame, view, slice) + r, err := c.BackupSlice(ctx, index, frame, view, slice) if err != nil { return fmt.Errorf("backup slice: slice=%d, err=%s", slice, err) } else if r == nil { @@ -515,16 +515,16 @@ func (c *Client) backupSliceTo(ctx context.Context, tw *tar.Writer, db, frame, v // BackupSlice retrieves a streaming backup from a single slice. // This function tries slice owners until one succeeds. -func (c *Client) BackupSlice(ctx context.Context, db, frame, view string, slice uint64) (io.ReadCloser, error) { +func (c *Client) BackupSlice(ctx context.Context, index, frame, view string, slice uint64) (io.ReadCloser, error) { // Retrieve a list of nodes that own the slice. - nodes, err := c.FragmentNodes(ctx, db, slice) + nodes, err := c.FragmentNodes(ctx, index, slice) if err != nil { return nil, fmt.Errorf("slice nodes: %s", err) } // Try to backup slice from each one until successful. for _, i := range rand.Perm(len(nodes)) { - r, err := c.backupSliceNode(ctx, db, frame, view, slice, nodes[i]) + r, err := c.backupSliceNode(ctx, index, frame, view, slice, nodes[i]) if err == nil { return r, nil // successfully attached } else if err == ErrFragmentNotFound { @@ -538,13 +538,13 @@ func (c *Client) BackupSlice(ctx context.Context, db, frame, view string, slice return nil, fmt.Errorf("unable to connect to any owner") } -func (c *Client) backupSliceNode(ctx context.Context, db, frame, view string, slice uint64, node *Node) (io.ReadCloser, error) { +func (c *Client) backupSliceNode(ctx context.Context, index, frame, view string, slice uint64, node *Node) (io.ReadCloser, error) { u := url.URL{ Scheme: "http", Host: node.Host, Path: "/fragment/data", RawQuery: url.Values{ - "db": {db}, + "index": {index}, "frame": {frame}, "view": {view}, "slice": {strconv.FormatUint(slice, 10)}, @@ -576,9 +576,9 @@ func (c *Client) backupSliceNode(ctx context.Context, db, frame, view string, sl } // RestoreFrom restores a frame from a backup file to an entire cluster. -func (c *Client) RestoreFrom(ctx context.Context, r io.Reader, db, frame, view string) error { - if db == "" { - return ErrDatabaseRequired +func (c *Client) RestoreFrom(ctx context.Context, r io.Reader, index, frame, view string) error { + if index == "" { + return ErrIndexRequired } else if frame == "" { return ErrFrameRequired } @@ -608,16 +608,16 @@ func (c *Client) RestoreFrom(ctx context.Context, r io.Reader, db, frame, view s } // Restore file to all nodes that own it. - if err := c.restoreSliceFrom(ctx, buf.Bytes(), db, frame, view, slice); err != nil { + if err := c.restoreSliceFrom(ctx, buf.Bytes(), index, frame, view, slice); err != nil { return err } } } // restoreSliceFrom restores a single slice to all owning nodes. -func (c *Client) restoreSliceFrom(ctx context.Context, buf []byte, db, frame, view string, slice uint64) error { +func (c *Client) restoreSliceFrom(ctx context.Context, buf []byte, index, frame, view string, slice uint64) error { // Retrieve a list of nodes that own the slice. - nodes, err := c.FragmentNodes(ctx, db, slice) + nodes, err := c.FragmentNodes(ctx, index, slice) if err != nil { return fmt.Errorf("slice nodes: %s", err) } @@ -629,7 +629,7 @@ func (c *Client) restoreSliceFrom(ctx context.Context, buf []byte, db, frame, vi Host: node.Host, Path: "/fragment/data", RawQuery: url.Values{ - "db": {db}, + "index": {index}, "frame": {frame}, "view": {view}, "slice": {strconv.FormatUint(slice, 10)}, @@ -659,9 +659,9 @@ func (c *Client) restoreSliceFrom(ctx context.Context, buf []byte, db, frame, vi } // CreateFrame creates a new frame on the server. -func (c *Client) CreateFrame(ctx context.Context, db, frame string, opt FrameOptions) error { - if db == "" { - return ErrDatabaseRequired +func (c *Client) CreateFrame(ctx context.Context, index, frame string, opt FrameOptions) error { + if index == "" { + return ErrIndexRequired } // Encode query request. @@ -673,7 +673,7 @@ func (c *Client) CreateFrame(ctx context.Context, db, frame string, opt FrameOpt } // Create URL & HTTP request. - u := url.URL{Scheme: "http", Host: c.host, Path: fmt.Sprintf("/db/%s/frame/%s", db, frame)} + u := url.URL{Scheme: "http", Host: c.host, Path: fmt.Sprintf("/index/%s/frame/%s", index, frame)} req, err := http.NewRequest("POST", u.String(), bytes.NewReader(buf)) if err != nil { return err @@ -707,11 +707,11 @@ func (c *Client) CreateFrame(ctx context.Context, db, frame string, opt FrameOpt } // RestoreFrame restores an entire frame from a host in another cluster. -func (c *Client) RestoreFrame(ctx context.Context, host, db, frame string) error { +func (c *Client) RestoreFrame(ctx context.Context, host, index, frame string) error { u := url.URL{ Scheme: "http", Host: c.Host(), - Path: fmt.Sprintf("/db/%s/frame/%s/restore", db, frame), + Path: fmt.Sprintf("/index/%s/frame/%s/restore", index, frame), RawQuery: url.Values{ "host": {host}, }.Encode(), @@ -740,12 +740,12 @@ func (c *Client) RestoreFrame(ctx context.Context, host, db, frame string) error } // FrameViews returns a list of view names for a frame. -func (c *Client) FrameViews(ctx context.Context, db, frame string) ([]string, error) { +func (c *Client) FrameViews(ctx context.Context, index, frame string) ([]string, error) { // Create URL & HTTP request. u := url.URL{ Scheme: "http", Host: c.host, - Path: fmt.Sprintf("/db/%s/frame/%s/views", db, frame), + Path: fmt.Sprintf("/index/%s/frame/%s/views", index, frame), } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -780,13 +780,13 @@ func (c *Client) FrameViews(ctx context.Context, db, frame string) ([]string, er // FragmentBlocks returns a list of block checksums for a fragment on a host. // Only returns blocks which contain data. -func (c *Client) FragmentBlocks(ctx context.Context, db, frame, view string, slice uint64) ([]FragmentBlock, error) { +func (c *Client) FragmentBlocks(ctx context.Context, index, frame, view string, slice uint64) ([]FragmentBlock, error) { u := url.URL{ Scheme: "http", Host: c.host, Path: "/fragment/blocks", RawQuery: url.Values{ - "db": {db}, + "index": {index}, "frame": {frame}, "view": {view}, "slice": {strconv.FormatUint(slice, 10)}, @@ -823,10 +823,10 @@ func (c *Client) FragmentBlocks(ctx context.Context, db, frame, view string, sli return rsp.Blocks, nil } -// BlockData returns bitmap/profile id pairs for a block. -func (c *Client) BlockData(ctx context.Context, db, frame, view string, slice uint64, block int) ([]uint64, []uint64, error) { +// BlockData returns row/column id pairs for a block. +func (c *Client) BlockData(ctx context.Context, index, frame, view string, slice uint64, block int) ([]uint64, []uint64, error) { buf, err := proto.Marshal(&internal.BlockDataRequest{ - DB: db, + Index: index, Frame: frame, View: view, Slice: slice, @@ -867,19 +867,19 @@ func (c *Client) BlockData(ctx context.Context, db, frame, view string, slice ui } else if err := proto.Unmarshal(body, &rsp); err != nil { return nil, nil, err } - return rsp.BitmapIDs, rsp.ProfileIDs, nil + return rsp.RowIDs, rsp.ColumnIDs, nil } -// ProfileAttrDiff returns data from differing blocks on a remote host. -func (c *Client) ProfileAttrDiff(ctx context.Context, db string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) { +// ColumnAttrDiff returns data from differing blocks on a remote host. +func (c *Client) ColumnAttrDiff(ctx context.Context, index string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) { u := url.URL{ Scheme: "http", Host: c.host, - Path: fmt.Sprintf("/db/%s/attr/diff", db), + Path: fmt.Sprintf("/index/%s/attr/diff", index), } // Encode request. - buf, err := json.Marshal(postDBAttrDiffRequest{Blocks: blks}) + buf, err := json.Marshal(postIndexAttrDiffRequest{Blocks: blks}) if err != nil { return nil, err } @@ -906,19 +906,19 @@ func (c *Client) ProfileAttrDiff(ctx context.Context, db string, blks []AttrBloc } // Decode response object. - var rsp postDBAttrDiffResponse + var rsp postIndexAttrDiffResponse if err := json.NewDecoder(resp.Body).Decode(&rsp); err != nil { return nil, err } return rsp.Attrs, nil } -// BitmapAttrDiff returns data from differing blocks on a remote host. -func (c *Client) BitmapAttrDiff(ctx context.Context, db, frame string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) { +// RowAttrDiff returns data from differing blocks on a remote host. +func (c *Client) RowAttrDiff(ctx context.Context, index, frame string, blks []AttrBlock) (map[uint64]map[string]interface{}, error) { u := url.URL{ Scheme: "http", Host: c.host, - Path: fmt.Sprintf("/db/%s/frame/%s/attr/diff", db, frame), + Path: fmt.Sprintf("/index/%s/frame/%s/attr/diff", index, frame), } // Encode request. @@ -960,8 +960,8 @@ func (c *Client) BitmapAttrDiff(ctx context.Context, db, frame string, blks []At // Bit represents the location of a single bit. type Bit struct { - BitmapID uint64 - ProfileID uint64 + RowID uint64 + ColumnID uint64 Timestamp int64 } @@ -972,29 +972,29 @@ func (p Bits) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Bits) Len() int { return len(p) } func (p Bits) Less(i, j int) bool { - if p[i].BitmapID == p[j].BitmapID { - if p[i].ProfileID < p[j].ProfileID { + if p[i].RowID == p[j].RowID { + if p[i].ColumnID < p[j].ColumnID { return p[i].Timestamp < p[j].Timestamp } - return p[i].ProfileID < p[j].ProfileID + return p[i].ColumnID < p[j].ColumnID } - return p[i].BitmapID < p[j].BitmapID + return p[i].RowID < p[j].RowID } -// BitmapIDs returns a slice of all the bitmap IDs. -func (a Bits) BitmapIDs() []uint64 { +// RowIDs returns a slice of all the row IDs. +func (a Bits) RowIDs() []uint64 { other := make([]uint64, len(a)) for i := range a { - other[i] = a[i].BitmapID + other[i] = a[i].RowID } return other } -// ProfileIDs returns a slice of all the profile IDs. -func (a Bits) ProfileIDs() []uint64 { +// ColumnIDs returns a slice of all the column IDs. +func (a Bits) ColumnIDs() []uint64 { other := make([]uint64, len(a)) for i := range a { - other[i] = a[i].ProfileID + other[i] = a[i].ColumnID } return other } @@ -1012,7 +1012,7 @@ func (a Bits) Timestamps() []int64 { func (a Bits) GroupBySlice() map[uint64][]Bit { m := make(map[uint64][]Bit) for _, bit := range a { - slice := bit.ProfileID / SliceWidth + slice := bit.ColumnID / SliceWidth m[slice] = append(m[slice], bit) } @@ -1030,7 +1030,7 @@ type BitsByPos []Bit func (p BitsByPos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p BitsByPos) Len() int { return len(p) } func (p BitsByPos) Less(i, j int) bool { - p0, p1 := Pos(p[i].BitmapID, p[i].ProfileID), Pos(p[j].BitmapID, p[j].ProfileID) + p0, p1 := Pos(p[i].RowID, p[i].ColumnID), Pos(p[j].RowID, p[j].ColumnID) if p0 == p1 { return p[i].Timestamp < p[j].Timestamp } diff --git a/client_test.go b/client_test.go index f78ee69c4..70676e399 100644 --- a/client_test.go +++ b/client_test.go @@ -13,81 +13,81 @@ import ( "github.com/pilosa/pilosa/pql" ) -func createCluster(c *pilosa.Cluster) ([]*Server, []*Index) { +func createCluster(c *pilosa.Cluster) ([]*Server, []*Holder) { numNodes := len(c.Nodes) - idx := make([]*Index, numNodes) + hldr := make([]*Holder, numNodes) server := make([]*Server, numNodes) for i := 0; i < numNodes; i++ { - idx[i] = MustOpenIndex() + hldr[i] = MustOpenHolder() server[i] = NewServer() server[i].Handler.Host = server[i].Host() server[i].Handler.Cluster = c server[i].Handler.Cluster.Nodes[i].Host = server[i].Host() - server[i].Handler.Index = idx[i].Index + server[i].Handler.Holder = hldr[i].Holder } - return server, idx + return server, hldr } -// Test distributed TopN Bitmap count across 3 nodes. +// Test distributed TopN Row count across 3 nodes. func TestClient_MultiNode(t *testing.T) { cluster := NewCluster(3) - s, idx := createCluster(cluster) + s, hldr := createCluster(cluster) for i := 0; i < len(cluster.Nodes); i++ { - defer idx[i].Close() + defer hldr[i].Close() defer s[i].Close() } - s[0].Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + s[0].Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { e := pilosa.NewExecutor() - e.Index = idx[0].Index + e.Holder = hldr[0].Holder e.Host = cluster.Nodes[0].Host e.Cluster = cluster - return e.Execute(ctx, db, query, slices, opt) + return e.Execute(ctx, index, query, slices, opt) } - s[1].Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + s[1].Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { e := pilosa.NewExecutor() - e.Index = idx[1].Index + e.Holder = hldr[1].Holder e.Host = cluster.Nodes[1].Host e.Cluster = cluster - return e.Execute(ctx, db, query, slices, opt) + return e.Execute(ctx, index, query, slices, opt) } - s[2].Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + s[2].Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { e := pilosa.NewExecutor() - e.Index = idx[2].Index + e.Holder = hldr[2].Holder e.Host = cluster.Nodes[2].Host e.Cluster = cluster - return e.Execute(ctx, db, query, slices, opt) + return e.Execute(ctx, index, query, slices, opt) } // Create a dispersed set of bitmaps across 3 nodes such that each individual node and slice width increment would reveal a different TopN. - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).MustSetBits(99, 1, 2, 3, 4) - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).MustSetBits(100, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).MustSetBits(98, 1, 2, 3, 4, 5, 6) - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).MustSetBits(1, 4) - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).MustSetBits(22, 1, 2, 3, 4, 5) - - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(100, (SliceWidth*10)+10) - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(4, (SliceWidth*10)+10, (SliceWidth*10)+11, (SliceWidth*10)+12) - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(4, (SliceWidth*10)+10, (SliceWidth*10)+11, (SliceWidth*10)+12, (SliceWidth*10)+13, (SliceWidth*10)+14, (SliceWidth*10)+15) - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(2, (SliceWidth*10)+1, (SliceWidth*10)+2, (SliceWidth*10)+3, (SliceWidth*10)+4) - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(3, (SliceWidth*10)+1, (SliceWidth*10)+2, (SliceWidth*10)+3, (SliceWidth*10)+4, (SliceWidth*10)+5) - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).MustSetBits(22, (SliceWidth*10)+1, (SliceWidth*10)+2, (SliceWidth*10)+10) - - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(24, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12, (SliceWidth*6)+13, (SliceWidth*6)+14) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(20, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12, (SliceWidth*6)+13) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(21, (SliceWidth*6)+10) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(100, (SliceWidth*6)+10) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(99, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(98, (SliceWidth*6)+10, (SliceWidth*6)+11) - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).MustSetBits(22, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(100, (SliceWidth*9)+10) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(4, (SliceWidth*9)+10, (SliceWidth*9)+11, (SliceWidth*9)+12) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(4, (SliceWidth*9)+10, (SliceWidth*9)+11, (SliceWidth*9)+12, (SliceWidth*9)+13, (SliceWidth*9)+14, (SliceWidth*9)+15) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(2, (SliceWidth*9)+1, (SliceWidth*9)+2, (SliceWidth*9)+3, (SliceWidth*9)+4) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(3, (SliceWidth*9)+1, (SliceWidth*9)+2, (SliceWidth*9)+3, (SliceWidth*9)+4, (SliceWidth*9)+5) + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 9).MustSetBits(22, (SliceWidth*9)+1, (SliceWidth*9)+2, (SliceWidth*9)+10) + + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(24, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12, (SliceWidth*6)+13, (SliceWidth*6)+14) + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(99, 1, 2, 3, 4) + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(100, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(98, 1, 2, 3, 4, 5, 6) + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(1, 4) + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(22, 1, 2, 3, 4, 5) + + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(20, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12, (SliceWidth*6)+13) + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(21, (SliceWidth*6)+10) + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(100, (SliceWidth*6)+10) + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(99, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12) + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(98, (SliceWidth*6)+10, (SliceWidth*6)+11) + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).MustSetBits(22, (SliceWidth*6)+10, (SliceWidth*6)+11, (SliceWidth*6)+12) // Rebuild the RankCache. // We have to do this to avoid the 10-second cache invalidation delay // built into cache.Invalidate() - idx[0].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 0).RecalculateCache() - idx[1].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 10).RecalculateCache() - idx[2].MustCreateFragmentIfNotExists("d", "f.n", pilosa.ViewStandard, 6).RecalculateCache() + hldr[0].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).RecalculateCache() + hldr[1].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 10).RecalculateCache() + hldr[2].MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 6).RecalculateCache() // Connect to each node to compare results. client := make([]*Client, 3) @@ -96,9 +96,9 @@ func TestClient_MultiNode(t *testing.T) { client[2] = MustNewClient(s[0].Host()) topN := 4 - q := fmt.Sprintf(`TopN(frame="%s", n=%d)`, "f.n", topN) + q := fmt.Sprintf(`TopN(frame="%s", n=%d)`, "f", topN) - result, err := client[0].ExecuteQuery(context.Background(), "d", q, true) + result, err := client[0].ExecuteQuery(context.Background(), "i", q, true) if err != nil { t.Fatal(err) } @@ -106,17 +106,17 @@ func TestClient_MultiNode(t *testing.T) { // Check the results before every node has the correct max slice value. pairs := result.(internal.QueryResponse).Results[0].Pairs for _, pair := range pairs { - if pair.Key == 22 && pair.Count != 5 { + if pair.Key == 22 && pair.Count != 11 { t.Fatalf("Invalid Cluster wide MaxSlice prevents accurate calculation of %s", pair) } } // Set max slice to correct value. - idx[0].DB("d").SetRemoteMaxSlice(10) - idx[1].DB("d").SetRemoteMaxSlice(10) - idx[2].DB("d").SetRemoteMaxSlice(10) + hldr[0].Index("i").SetRemoteMaxSlice(10) + hldr[1].Index("i").SetRemoteMaxSlice(10) + hldr[2].Index("i").SetRemoteMaxSlice(10) - result, err = client[0].ExecuteQuery(context.Background(), "d", q, true) + result, err = client[0].ExecuteQuery(context.Background(), "i", q, true) if err != nil { t.Fatal(err) } @@ -136,11 +136,11 @@ func TestClient_MultiNode(t *testing.T) { t.Fatalf("Invalid TopN result set: %s", spew.Sdump(result)) } - result1, err := client[1].ExecuteQuery(context.Background(), "d", q, true) + result1, err := client[1].ExecuteQuery(context.Background(), "i", q, true) if err != nil { t.Fatal(err) } - result2, err := client[2].ExecuteQuery(context.Background(), "d", q, true) + result2, err := client[2].ExecuteQuery(context.Background(), "i", q, true) if err != nil { t.Fatal(err) } @@ -157,49 +157,49 @@ func TestClient_MultiNode(t *testing.T) { // Ensure client can bulk import data. func TestClient_Import(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() // Load bitmap into cache to ensure cache gets updated. - f := idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0) - f.Bitmap(0) + f := hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0) + f.Row(0) s := NewServer() defer s.Close() s.Handler.Host = s.Host() s.Handler.Cluster = NewCluster(1) s.Handler.Cluster.Nodes[0].Host = s.Host() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder // Send import request. c := MustNewClient(s.Host()) - if err := c.Import(context.Background(), "d", "f", 0, []pilosa.Bit{ - {BitmapID: 0, ProfileID: 1}, - {BitmapID: 0, ProfileID: 5}, - {BitmapID: 200, ProfileID: 6}, + if err := c.Import(context.Background(), "i", "f", 0, []pilosa.Bit{ + {RowID: 0, ColumnID: 1}, + {RowID: 0, ColumnID: 5}, + {RowID: 200, ColumnID: 6}, }); err != nil { t.Fatal(err) } // Verify data. - if a := f.Bitmap(0).Bits(); !reflect.DeepEqual(a, []uint64{1, 5}) { + if a := f.Row(0).Bits(); !reflect.DeepEqual(a, []uint64{1, 5}) { t.Fatalf("unexpected bits: %+v", a) } - if a := f.Bitmap(200).Bits(); !reflect.DeepEqual(a, []uint64{6}) { + if a := f.Row(200).Bits(); !reflect.DeepEqual(a, []uint64{6}) { t.Fatalf("unexpected bits: %+v", a) } } // Ensure client can bulk import data to an inverse frame. func TestClient_ImportInverseEnabled(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - d := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) + idx := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) frameOpts := pilosa.FrameOptions{ InverseEnabled: true, } - frame, err := d.CreateFrameIfNotExists("f", frameOpts) + frame, err := idx.CreateFrameIfNotExists("f", frameOpts) if err != nil { panic(err) } @@ -213,65 +213,65 @@ func TestClient_ImportInverseEnabled(t *testing.T) { } // Load bitmap into cache to ensure cache gets updated. - f.Bitmap(0) + f.Row(0) s := NewServer() defer s.Close() s.Handler.Host = s.Host() s.Handler.Cluster = NewCluster(1) s.Handler.Cluster.Nodes[0].Host = s.Host() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder // Send import request. c := MustNewClient(s.Host()) - if err := c.Import(context.Background(), "d", "f", 0, []pilosa.Bit{ - {BitmapID: 0, ProfileID: 1}, - {BitmapID: 0, ProfileID: 5}, - {BitmapID: 200, ProfileID: 5}, - {BitmapID: 200, ProfileID: 6}, + if err := c.Import(context.Background(), "i", "f", 0, []pilosa.Bit{ + {RowID: 0, ColumnID: 1}, + {RowID: 0, ColumnID: 5}, + {RowID: 200, ColumnID: 5}, + {RowID: 200, ColumnID: 6}, }); err != nil { t.Fatal(err) } // Verify data. - if a := f.Bitmap(1).Bits(); !reflect.DeepEqual(a, []uint64{0}) { + if a := f.Row(1).Bits(); !reflect.DeepEqual(a, []uint64{0}) { t.Fatalf("unexpected bits: %+v", a) } - if a := f.Bitmap(5).Bits(); !reflect.DeepEqual(a, []uint64{0, 200}) { + if a := f.Row(5).Bits(); !reflect.DeepEqual(a, []uint64{0, 200}) { t.Fatalf("unexpected bits: %+v", a) } - if a := f.Bitmap(6).Bits(); !reflect.DeepEqual(a, []uint64{200}) { + if a := f.Row(6).Bits(); !reflect.DeepEqual(a, []uint64{200}) { t.Fatalf("unexpected bits: %+v", a) } } // Ensure client backup and restore a frame. func TestClient_BackupRestore(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).MustSetBits(100, 1, 2, 3, SliceWidth-1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(100, SliceWidth, SliceWidth+2) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 5).MustSetBits(100, (5*SliceWidth)+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).MustSetBits(200, 20000) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(100, 1, 2, 3, SliceWidth-1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).MustSetBits(100, SliceWidth, SliceWidth+2) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 5).MustSetBits(100, (5*SliceWidth)+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(200, 20000) s := NewServer() defer s.Close() s.Handler.Host = s.Host() s.Handler.Cluster = NewCluster(1) s.Handler.Cluster.Nodes[0].Host = s.Host() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder c := MustNewClient(s.Host()) // Backup from frame. var buf bytes.Buffer - if err := c.BackupTo(context.Background(), &buf, "d", "f", pilosa.ViewStandard); err != nil { + if err := c.BackupTo(context.Background(), &buf, "i", "f", pilosa.ViewStandard); err != nil { t.Fatal(err) } // Restore to a different frame. - if _, err := idx.MustCreateDBIfNotExists("x", pilosa.DBOptions{}).CreateFrameIfNotExists("y", pilosa.FrameOptions{}); err != nil { + if _, err := hldr.MustCreateIndexIfNotExists("x", pilosa.IndexOptions{}).CreateFrameIfNotExists("y", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } if err := c.RestoreFrom(context.Background(), &buf, "x", "y", pilosa.ViewStandard); err != nil { @@ -279,42 +279,42 @@ func TestClient_BackupRestore(t *testing.T) { } // Verify data. - if a := idx.Fragment("x", "y", pilosa.ViewStandard, 0).Bitmap(100).Bits(); !reflect.DeepEqual(a, []uint64{1, 2, 3, SliceWidth - 1}) { + if a := hldr.Fragment("x", "y", pilosa.ViewStandard, 0).Row(100).Bits(); !reflect.DeepEqual(a, []uint64{1, 2, 3, SliceWidth - 1}) { t.Fatalf("unexpected bits(0): %+v", a) } - if a := idx.Fragment("x", "y", pilosa.ViewStandard, 1).Bitmap(100).Bits(); !reflect.DeepEqual(a, []uint64{SliceWidth, SliceWidth + 2}) { + if a := hldr.Fragment("x", "y", pilosa.ViewStandard, 1).Row(100).Bits(); !reflect.DeepEqual(a, []uint64{SliceWidth, SliceWidth + 2}) { t.Fatalf("unexpected bits(0): %+v", a) } - if a := idx.Fragment("x", "y", pilosa.ViewStandard, 5).Bitmap(100).Bits(); !reflect.DeepEqual(a, []uint64{(5 * SliceWidth) + 1}) { + if a := hldr.Fragment("x", "y", pilosa.ViewStandard, 5).Row(100).Bits(); !reflect.DeepEqual(a, []uint64{(5 * SliceWidth) + 1}) { t.Fatalf("unexpected bits(0): %+v", a) } - if a := idx.Fragment("x", "y", pilosa.ViewStandard, 0).Bitmap(200).Bits(); !reflect.DeepEqual(a, []uint64{20000}) { + if a := hldr.Fragment("x", "y", pilosa.ViewStandard, 0).Row(200).Bits(); !reflect.DeepEqual(a, []uint64{20000}) { t.Fatalf("unexpected bits: %+v", a) } } // Ensure client can retrieve a list of all checksums for blocks in a fragment. func TestClient_FragmentBlocks(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() // Set two bits on blocks 0 & 3. - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(pilosa.HashBlockSize*3, 100) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(pilosa.HashBlockSize*3, 100) // Set a bit on a different slice. - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, 1) s := NewServer() defer s.Close() s.Handler.Host = s.Host() s.Handler.Cluster = NewCluster(1) s.Handler.Cluster.Nodes[0].Host = s.Host() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder // Retrieve blocks. c := MustNewClient(s.Host()) - blocks, err := c.FragmentBlocks(context.Background(), "d", "f", pilosa.ViewStandard, 0) + blocks, err := c.FragmentBlocks(context.Background(), "i", "f", pilosa.ViewStandard, 0) if err != nil { t.Fatal(err) } else if len(blocks) != 2 { @@ -326,7 +326,7 @@ func TestClient_FragmentBlocks(t *testing.T) { } // Verify data matches local blocks. - if a := idx.Fragment("d", "f", pilosa.ViewStandard, 0).Blocks(); !reflect.DeepEqual(a, blocks) { + if a := hldr.Fragment("i", "f", pilosa.ViewStandard, 0).Blocks(); !reflect.DeepEqual(a, blocks) { t.Fatalf("blocks mismatch:\n\nexp=%s\n\ngot=%s\n\n", spew.Sdump(a), spew.Sdump(blocks)) } } diff --git a/cluster.go b/cluster.go index 0e874e06e..ac5b2ec6f 100644 --- a/cluster.go +++ b/cluster.go @@ -15,15 +15,30 @@ const ( // DefaultReplicaN is the default number of replicas per partition. DefaultReplicaN = 1 - // HealthStatus is the return value of the /health endpoint for a node in the cluster. - HealthStatusUp = "UP" - HealthStatusDown = "DOWN" + // NodeState represents node state returned in /status endpoint for a node in the cluster. + NodeStateUp = "UP" + NodeStateDown = "DOWN" ) // Node represents a node in the cluster. type Node struct { Host string `json:"host"` InternalHost string `json:"internalHost"` + + status *internal.NodeStatus `json:"status"` +} + +// SetStatus sets the NodeStatus. +func (n *Node) SetStatus(s *internal.NodeStatus) { + n.status = s +} + +// SetState sets the Node.status.state. +func (n *Node) SetState(s string) { + if n.status == nil { + n.status = &internal.NodeStatus{} + } + n.status.State = s } // Nodes represents a list of nodes. @@ -89,7 +104,6 @@ func (a Nodes) Clone() []*Node { // Cluster represents a collection of nodes. type Cluster struct { - mu sync.Mutex Nodes []*Node NodeSet NodeSet @@ -101,9 +115,6 @@ type Cluster struct { // The number of replicas a partition has. ReplicaN int - - // Current state of nodes in the cluster - NodeState map[string]*internal.NodeState } // NewCluster returns a new instance of Cluster with defaults. @@ -112,7 +123,6 @@ func NewCluster() *Cluster { Hasher: &jmphasher{}, PartitionN: DefaultPartitionN, ReplicaN: DefaultReplicaN, - NodeState: make(map[string]*internal.NodeState), } } @@ -128,21 +138,37 @@ func (c *Cluster) NodeSetHosts() []string { return a } -// Health returns a map of nodes in the cluster with each node's state (UP/DOWN) as the value. -func (c *Cluster) Health() map[string]string { +// NodeStates returns a map of nodes in the cluster with each node's state (UP/DOWN) as the value. +func (c *Cluster) NodeStates() map[string]string { h := make(map[string]string) for _, n := range c.Nodes { - h[n.Host] = HealthStatusDown + h[n.Host] = NodeStateDown } // we are assuming that NodeSetHosts is a subset of c.Nodes for _, m := range c.NodeSetHosts() { if _, ok := h[m]; ok { - h[m] = HealthStatusUp + h[m] = NodeStateUp } } return h } +// State returns the internal ClusterState representation. +func (c *Cluster) Status() *internal.ClusterStatus { + return &internal.ClusterStatus{ + Nodes: encodeClusterStatus(c.Nodes), + } +} + +// encodeClusterStatus converts a into its internal representation. +func encodeClusterStatus(a []*Node) []*internal.NodeStatus { + other := make([]*internal.NodeStatus, len(a)) + for i := range a { + other[i] = a[i].status + } + return other +} + // NodeByHost returns a node reference by host. func (c *Cluster) NodeByHost(host string) *Node { for _, n := range c.Nodes { @@ -154,25 +180,25 @@ func (c *Cluster) NodeByHost(host string) *Node { } // Partition returns the partition that a slice belongs to. -func (c *Cluster) Partition(db string, slice uint64) int { +func (c *Cluster) Partition(index string, slice uint64) int { var buf [8]byte binary.BigEndian.PutUint64(buf[:], slice) // Hash the bytes and mod by partition count. h := fnv.New64a() - h.Write([]byte(db)) + h.Write([]byte(index)) h.Write(buf[:]) return int(h.Sum64() % uint64(c.PartitionN)) } // FragmentNodes returns a list of nodes that own a fragment. -func (c *Cluster) FragmentNodes(db string, slice uint64) []*Node { - return c.PartitionNodes(c.Partition(db, slice)) +func (c *Cluster) FragmentNodes(index string, slice uint64) []*Node { + return c.PartitionNodes(c.Partition(index, slice)) } // OwnsFragment returns true if a host owns a fragment. -func (c *Cluster) OwnsFragment(host string, db string, slice uint64) bool { - return Nodes(c.FragmentNodes(db, slice)).ContainsHost(host) +func (c *Cluster) OwnsFragment(host string, index string, slice uint64) bool { + return Nodes(c.FragmentNodes(index, slice)).ContainsHost(host) } // PartitionNodes returns a list of nodes that own a partition. @@ -187,25 +213,25 @@ func (c *Cluster) PartitionNodes(partitionID int) []*Node { } // Determine primary owner node. - index := c.Hasher.Hash(uint64(partitionID), len(c.Nodes)) + node_index := c.Hasher.Hash(uint64(partitionID), len(c.Nodes)) // Collect nodes around the ring. nodes := make([]*Node, replicaN) for i := 0; i < replicaN; i++ { - nodes[i] = c.Nodes[(index+i)%len(c.Nodes)] + nodes[i] = c.Nodes[(node_index+i)%len(c.Nodes)] } return nodes } -// OwnsSlices find the set of slices owned by the node per DB -func (c *Cluster) OwnsSlices(db string, maxSlice uint64, host string) []uint64 { +// OwnsSlices find the set of slices owned by the node per Index +func (c *Cluster) OwnsSlices(index string, maxSlice uint64, host string) []uint64 { var slices []uint64 for i := uint64(0); i <= maxSlice; i++ { - p := c.Partition(db, i) + p := c.Partition(index, i) // Determine primary owner node. - index := c.Hasher.Hash(uint64(p), len(c.Nodes)) - if c.Nodes[index].Host == host { + node_index := c.Hasher.Hash(uint64(p), len(c.Nodes)) + if c.Nodes[node_index].Host == host { slices = append(slices, i) } @@ -213,22 +239,6 @@ func (c *Cluster) OwnsSlices(db string, maxSlice uint64, host string) []uint64 { return slices } -// SetNodeState stores the remote node states transmitted through gossip -func (c *Cluster) SetNodeState(state *internal.NodeState) { - c.mu.Lock() - defer c.mu.Unlock() - - c.NodeState[state.Host] = state -} - -// GetNodeState stores the remote node states transmitted through gossip -func (c *Cluster) GetNodeState(host string) *internal.NodeState { - c.mu.Lock() - defer c.mu.Unlock() - - return c.NodeState[host] -} - // Hasher represents an interface to hash integers into buckets. type Hasher interface { // Hashes the key into a number between [0,N). diff --git a/cluster_test.go b/cluster_test.go index 9dbaa420d..4c96cd5d9 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -37,11 +37,11 @@ func TestCluster_Owners(t *testing.T) { // Ensure the partitioner can assign a fragment to a partition. func TestCluster_Partition(t *testing.T) { - if err := quick.Check(func(db string, slice uint64, partitionN int) bool { + if err := quick.Check(func(index string, slice uint64, partitionN int) bool { c := pilosa.NewCluster() c.PartitionN = partitionN - partitionID := c.Partition(db, slice) + partitionID := c.Partition(index, slice) if partitionID < 0 || partitionID >= partitionN { t.Errorf("partition out of range: slice=%d, p=%d, n=%d", slice, partitionID, partitionN) } @@ -89,7 +89,7 @@ func TestCluster_NodeSetHosts(t *testing.T) { } // Ensure cluster can compare its Nodes and Members -func TestCluster_Health(t *testing.T) { +func TestCluster_NodeStates(t *testing.T) { c := pilosa.Cluster{ Nodes: []*pilosa.Node{ {Host: "serverA:1000"}, @@ -109,12 +109,12 @@ func TestCluster_Health(t *testing.T) { } // Verify a DOWN node is reported, and extraneous nodes are ignored - if a := c.Health(); !reflect.DeepEqual(a, map[string]string{ - "serverA:1000": pilosa.HealthStatusUp, - "serverB:1000": pilosa.HealthStatusDown, - "serverC:1000": pilosa.HealthStatusUp, + if a := c.NodeStates(); !reflect.DeepEqual(a, map[string]string{ + "serverA:1000": pilosa.NodeStateUp, + "serverB:1000": pilosa.NodeStateDown, + "serverC:1000": pilosa.NodeStateUp, }) { - t.Fatalf("unexpected health: %s", spew.Sdump(a)) + t.Fatalf("unexpected node state: %s", spew.Sdump(a)) } } diff --git a/cmd/backup.go b/cmd/backup.go index 24904cbb5..d25b968f4 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -29,7 +29,7 @@ Backs up the view from across the cluster into a single file. } flags := backupCmd.Flags() flags.StringVarP(&Backuper.Host, "host", "", "localhost:10101", "host:port of Pilosa.") - flags.StringVarP(&Backuper.Database, "database", "d", "", "Pilosa database to backup into.") + flags.StringVarP(&Backuper.Index, "index", "i", "", "Pilosa index to backup into.") flags.StringVarP(&Backuper.Frame, "frame", "f", "", "Frame to backup into.") flags.StringVarP(&Backuper.View, "view", "v", "", "View to backup into.") flags.StringVarP(&Backuper.Path, "output-file", "o", "", "File to write backup to - default stdout") diff --git a/cmd/backup_test.go b/cmd/backup_test.go index cacd6510b..4e8f8fe43 100644 --- a/cmd/backup_test.go +++ b/cmd/backup_test.go @@ -22,13 +22,13 @@ func TestBackupConfig(t *testing.T) { args: []string{"backup", "--output-file", "/somefile"}, env: map[string]string{"PILOSA_HOST": "localhost:12345"}, cfgFileContent: ` -database = "mydb" +index = "myindex" frame = "f1" `, validation: func() error { v := validator{} v.Check(cmd.Backuper.Host, "localhost:12345") - v.Check(cmd.Backuper.Database, "mydb") + v.Check(cmd.Backuper.Index, "myindex") v.Check(cmd.Backuper.Frame, "f1") v.Check(cmd.Backuper.Path, "/somefile") return v.Error() diff --git a/cmd/bench.go b/cmd/bench.go index ec7be3760..2dd0d86fc 100644 --- a/cmd/bench.go +++ b/cmd/bench.go @@ -18,7 +18,7 @@ func NewBenchCommand(stdin io.Reader, stdout, stderr io.Writer) *cobra.Command { Use: "bench", Short: "Benchmark operations.", Long: ` -Executes a benchmark for a given operation against the database. +Executes a benchmark for a given operation against the index. `, RunE: func(cmd *cobra.Command, args []string) error { if err := Bencher.Run(context.Background()); err != nil { @@ -29,7 +29,7 @@ Executes a benchmark for a given operation against the database. } flags := benchCmd.Flags() flags.StringVarP(&Bencher.Host, "host", "", "localhost:10101", "host:port of Pilosa.") - flags.StringVarP(&Bencher.Database, "database", "d", "", "Pilosa database to benchmark.") + flags.StringVarP(&Bencher.Index, "index", "i", "", "Pilosa index to benchmark.") flags.StringVarP(&Bencher.Frame, "frame", "f", "", "Frame to benchmark.") flags.StringVarP(&Bencher.Op, "operation", "o", "set-bit", "Operation to perform: choose from [set-bit]") flags.IntVarP(&Bencher.N, "num", "n", 0, "Number of operations to perform.") diff --git a/cmd/bench_test.go b/cmd/bench_test.go index 2b5e2c872..db47829ea 100644 --- a/cmd/bench_test.go +++ b/cmd/bench_test.go @@ -22,13 +22,13 @@ func TestBenchConfig(t *testing.T) { args: []string{"bench", "--operation", "set-bit"}, env: map[string]string{"PILOSA_HOST": "localhost:12345"}, cfgFileContent: ` -database = "mydb" +index = "myindex" frame = "f1" `, validation: func() error { v := validator{} v.Check(cmd.Bencher.Host, "localhost:12345") - v.Check(cmd.Bencher.Database, "mydb") + v.Check(cmd.Bencher.Index, "myindex") v.Check(cmd.Bencher.Frame, "f1") v.Check(cmd.Bencher.Op, "set-bit") v.Check(cmd.Bencher.N, 0) diff --git a/cmd/export.go b/cmd/export.go index fb22ee369..186759909 100644 --- a/cmd/export.go +++ b/cmd/export.go @@ -23,7 +23,7 @@ the output is written to STDOUT. The format of the CSV file is: - BITMAPID,PROFILEID + ROWID,COLUMNID The file does not contain any headers. `, @@ -37,7 +37,7 @@ The file does not contain any headers. flags := exportCmd.Flags() flags.StringVarP(&Exporter.Host, "host", "", "localhost:10101", "host:port of Pilosa.") - flags.StringVarP(&Exporter.Database, "database", "d", "", "Pilosa database to export into.") + flags.StringVarP(&Exporter.Index, "index", "i", "", "Pilosa index to export into.") flags.StringVarP(&Exporter.Frame, "frame", "f", "", "Frame to export into.") flags.StringVarP(&Exporter.Path, "output-file", "o", "", "File to write export to - default stdout") diff --git a/cmd/export_test.go b/cmd/export_test.go index d546a839a..45ddc58c6 100644 --- a/cmd/export_test.go +++ b/cmd/export_test.go @@ -22,13 +22,13 @@ func TestExportConfig(t *testing.T) { args: []string{"export", "--output-file", "/somefile"}, env: map[string]string{"PILOSA_HOST": "localhost:12345"}, cfgFileContent: ` -database = "mydb" +index = "myindex" frame = "f1" `, validation: func() error { v := validator{} v.Check(cmd.Exporter.Host, "localhost:12345") - v.Check(cmd.Exporter.Database, "mydb") + v.Check(cmd.Exporter.Index, "myindex") v.Check(cmd.Exporter.Frame, "f1") v.Check(cmd.Exporter.Path, "/somefile") return v.Error() diff --git a/cmd/import.go b/cmd/import.go index b7998f750..43b26e7f8 100644 --- a/cmd/import.go +++ b/cmd/import.go @@ -16,12 +16,12 @@ func NewImportCommand(stdin io.Reader, stdout, stderr io.Writer) *cobra.Command importCmd := &cobra.Command{ Use: "import", Short: "Bulk load data into pilosa.", - Long: `Bulk imports one or more CSV files to a host's database and frame. The bits + Long: `Bulk imports one or more CSV files to a host's index and frame. The bits of the CSV file are grouped by slice for the most efficient import. The format of the CSV file is: - BITMAPID,PROFILEID,[TIME] + ROWID,COLUMNID,[TIME] The file should contain no headers. The TIME column is optional and can be omitted. If it is present then its format should be YYYY-MM-DDTHH:MM. @@ -36,7 +36,7 @@ omitted. If it is present then its format should be YYYY-MM-DDTHH:MM. } flags := importCmd.Flags() flags.StringVarP(&Importer.Host, "host", "", "localhost:10101", "host:port of Pilosa.") - flags.StringVarP(&Importer.Database, "database", "d", "", "Pilosa database to import into.") + flags.StringVarP(&Importer.Index, "index", "i", "", "Pilosa index to import into.") flags.StringVarP(&Importer.Frame, "frame", "f", "", "Frame to import into.") flags.IntVarP(&Importer.BufferSize, "buffer-size", "s", 10000000, "Number of bits to buffer/sort before importing.") diff --git a/cmd/import_test.go b/cmd/import_test.go index ac98539a4..0251d1894 100644 --- a/cmd/import_test.go +++ b/cmd/import_test.go @@ -22,13 +22,13 @@ func TestImportConfig(t *testing.T) { args: []string{"import"}, env: map[string]string{"PILOSA_HOST": "localhost:12345"}, cfgFileContent: ` -database = "mydb" +index = "myindex" frame = "f1" `, validation: func() error { v := validator{} v.Check(cmd.Importer.Host, "localhost:12345") - v.Check(cmd.Importer.Database, "mydb") + v.Check(cmd.Importer.Index, "myindex") v.Check(cmd.Importer.Frame, "f1") return v.Error() }, diff --git a/cmd/restore.go b/cmd/restore.go index 5179cbc0d..38a1a3145 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -30,10 +30,10 @@ Restores a view to the cluster from a backup file. } flags := restoreCmd.Flags() flags.StringVarP(&Restorer.Host, "host", "", "localhost:10101", "host:port of Pilosa.") - flags.StringVarP(&Restorer.Database, "database", "d", "", "Pilosa database to restore into.") + flags.StringVarP(&Restorer.Index, "index", "i", "", "Pilosa index to restore into.") flags.StringVarP(&Restorer.Frame, "frame", "f", "", "Frame to restore into.") flags.StringVarP(&Restorer.View, "view", "v", "", "View to restore into.") - flags.StringVarP(&Restorer.Path, "input-file", "i", "", "File to restore from.") + flags.StringVarP(&Restorer.Path, "input-file", "d", "", "File to restore data from.") return restoreCmd } diff --git a/cmd/restore_test.go b/cmd/restore_test.go index ef8a90d32..6f8b5c15b 100644 --- a/cmd/restore_test.go +++ b/cmd/restore_test.go @@ -22,13 +22,13 @@ func TestRestoreConfig(t *testing.T) { args: []string{"restore", "--input-file", "/somefile"}, env: map[string]string{"PILOSA_HOST": "localhost:12345"}, cfgFileContent: ` -database = "mydb" +index = "myindex" frame = "f1" `, validation: func() error { v := validator{} v.Check(cmd.Restorer.Host, "localhost:12345") - v.Check(cmd.Restorer.Database, "mydb") + v.Check(cmd.Restorer.Index, "myindex") v.Check(cmd.Restorer.Frame, "f1") v.Check(cmd.Restorer.Path, "/somefile") return v.Error() diff --git a/cmd/sort.go b/cmd/sort.go index 0ea7d9007..668206070 100644 --- a/cmd/sort.go +++ b/cmd/sort.go @@ -25,7 +25,7 @@ Sorts the import data at PATH into the optimal sort order for importing. The format of the CSV file is: - BITMAPID,PROFILEID + ROWID,COLUMNID The file should contain no headers. `, diff --git a/ctl/backup.go b/ctl/backup.go index 6eeed5d4c..369455e7c 100644 --- a/ctl/backup.go +++ b/ctl/backup.go @@ -14,10 +14,10 @@ type BackupCommand struct { // Destination host and port. Host string - // Name of the database, frame, view to backup. - Database string - Frame string - View string + // Name of the index, frame, view to backup. + Index string + Frame string + View string // Output file to write to. Path string @@ -54,7 +54,7 @@ func (cmd *BackupCommand) Run(ctx context.Context) error { defer f.Close() // Begin streaming backup. - if err := client.BackupTo(ctx, f, cmd.Database, cmd.Frame, cmd.View); err != nil { + if err := client.BackupTo(ctx, f, cmd.Index, cmd.Frame, cmd.View); err != nil { return err } diff --git a/ctl/bench.go b/ctl/bench.go index 2216bace0..b63d9165f 100644 --- a/ctl/bench.go +++ b/ctl/bench.go @@ -11,14 +11,14 @@ import ( "github.com/pilosa/pilosa" ) -// BenchCommand represents a command for benchmarking database operations. +// BenchCommand represents a command for benchmarking index operations. type BenchCommand struct { // Destination host and port. Host string - // Name of the database & frame to execute against. - Database string - Frame string + // Name of the index & frame to execute against. + Index string + Frame string // Type of operation and number to execute. Op string @@ -57,25 +57,25 @@ func (cmd *BenchCommand) Run(ctx context.Context) error { func (cmd *BenchCommand) runSetBit(ctx context.Context, client *pilosa.Client) error { if cmd.N == 0 { return errors.New("operation count required") - } else if cmd.Database == "" { - return pilosa.ErrDatabaseRequired + } else if cmd.Index == "" { + return pilosa.ErrIndexRequired } else if cmd.Frame == "" { return pilosa.ErrFrameRequired } - const maxBitmapID = 1000 - const maxProfileID = 100000 + const maxRowID = 1000 + const maxColumnID = 100000 startTime := time.Now() // Execute operation continuously. for i := 0; i < cmd.N; i++ { - bitmapID := rand.Intn(maxBitmapID) - profileID := rand.Intn(maxProfileID) + rowID := rand.Intn(maxRowID) + columnID := rand.Intn(maxColumnID) - q := fmt.Sprintf(`SetBit(id=%d, frame="%s", profileID=%d)`, bitmapID, cmd.Frame, profileID) + q := fmt.Sprintf(`SetBit(id=%d, frame="%s", columnID=%d)`, rowID, cmd.Frame, columnID) - if _, err := client.ExecuteQuery(ctx, cmd.Database, q, true); err != nil { + if _, err := client.ExecuteQuery(ctx, cmd.Index, q, true); err != nil { return err } } diff --git a/ctl/export.go b/ctl/export.go index f4b0efd0a..6bbf1ac0e 100644 --- a/ctl/export.go +++ b/ctl/export.go @@ -14,9 +14,9 @@ type ExportCommand struct { // Remote host and port. Host string - // Name of the database & frame to export from. - Database string - Frame string + // Name of the index & frame to export from. + Index string + Frame string // Filename to export to. Path string @@ -37,8 +37,8 @@ func (cmd *ExportCommand) Run(ctx context.Context) error { logger := log.New(cmd.Stderr, "", log.LstdFlags) // Validate arguments. - if cmd.Database == "" { - return pilosa.ErrDatabaseRequired + if cmd.Index == "" { + return pilosa.ErrIndexRequired } else if cmd.Frame == "" { return pilosa.ErrFrameRequired } @@ -63,15 +63,15 @@ func (cmd *ExportCommand) Run(ctx context.Context) error { } // Determine slice count. - maxSlices, err := client.MaxSliceByDatabase(ctx) + maxSlices, err := client.MaxSliceByIndex(ctx) if err != nil { return err } // Export each slice. - for slice := uint64(0); slice <= maxSlices[cmd.Database]; slice++ { + for slice := uint64(0); slice <= maxSlices[cmd.Index]; slice++ { logger.Printf("exporting slice: %d", slice) - if err := client.ExportCSV(ctx, cmd.Database, cmd.Frame, slice, w); err != nil { + if err := client.ExportCSV(ctx, cmd.Index, cmd.Frame, slice, w); err != nil { return err } } diff --git a/ctl/import.go b/ctl/import.go index df8fb00eb..cefe2e45b 100644 --- a/ctl/import.go +++ b/ctl/import.go @@ -19,9 +19,9 @@ type ImportCommand struct { // Destination host and port. Host string `json:"host"` - // Name of the database & frame to import into. - Database string `json:"db"` - Frame string `json:"frame"` + // Name of the index & frame to import into. + Index string `json:"index"` + Frame string `json:"frame"` // Filenames to import from. Paths []string `json:"paths"` @@ -54,9 +54,9 @@ func (cmd *ImportCommand) Run(ctx context.Context) error { logger := log.New(cmd.Stderr, "", log.LstdFlags) // Validate arguments. - // Database and frame are validated early before the files are parsed. - if cmd.Database == "" { - return pilosa.ErrDatabaseRequired + // Index and frame are validated early before the files are parsed. + if cmd.Index == "" { + return pilosa.ErrIndexRequired } else if cmd.Frame == "" { return pilosa.ErrFrameRequired } else if len(cmd.Paths) == 0 { @@ -123,19 +123,19 @@ func (cmd *ImportCommand) importPath(ctx context.Context, path string) error { var bit pilosa.Bit - // Parse bitmap id. - bitmapID, err := strconv.ParseUint(record[0], 10, 64) + // Parse row id. + rowID, err := strconv.ParseUint(record[0], 10, 64) if err != nil { - return fmt.Errorf("invalid bitmap id on row %d: %q", rnum, record[0]) + return fmt.Errorf("invalid row id on row %d: %q", rnum, record[0]) } - bit.BitmapID = bitmapID + bit.RowID = rowID - // Parse bitmap id. - profileID, err := strconv.ParseUint(record[1], 10, 64) + // Parse column id. + columnID, err := strconv.ParseUint(record[1], 10, 64) if err != nil { - return fmt.Errorf("invalid profile id on row %d: %q", rnum, record[1]) + return fmt.Errorf("invalid column id on row %d: %q", rnum, record[1]) } - bit.ProfileID = profileID + bit.ColumnID = columnID // Parse time, if exists. if len(record) > 2 && record[2] != "" { @@ -176,7 +176,7 @@ func (cmd *ImportCommand) importBits(ctx context.Context, bits []pilosa.Bit) err // Parse path into bits. for slice, bits := range bitsBySlice { logger.Printf("importing slice: %d, n=%d", slice, len(bits)) - if err := cmd.Client.Import(ctx, cmd.Database, cmd.Frame, slice, bits); err != nil { + if err := cmd.Client.Import(ctx, cmd.Index, cmd.Frame, slice, bits); err != nil { return err } } diff --git a/ctl/restore.go b/ctl/restore.go index 56dfb3080..ecfcf5f5a 100644 --- a/ctl/restore.go +++ b/ctl/restore.go @@ -14,10 +14,10 @@ type RestoreCommand struct { // Destination host and port. Host string - // Name of the database & frame to backup. - Database string - Frame string - View string + // Name of the index & frame to backup. + Index string + Frame string + View string // Import file to read from. Path string @@ -54,7 +54,7 @@ func (cmd *RestoreCommand) Run(ctx context.Context) error { defer f.Close() // Restore backup file to the cluster. - if err := client.RestoreFrom(ctx, f, cmd.Database, cmd.Frame, cmd.View); err != nil { + if err := client.RestoreFrom(ctx, f, cmd.Index, cmd.Frame, cmd.View); err != nil { return err } diff --git a/ctl/sort.go b/ctl/sort.go index 9d4b14277..5590d2538 100644 --- a/ctl/sort.go +++ b/ctl/sort.go @@ -45,7 +45,7 @@ func (cmd *SortCommand) Run(ctx context.Context) error { r.FieldsPerRecord = -1 a := make([]pilosa.Bit, 0, 1000000) for { - bitmapID, profileID, timestamp, err := readCSVRow(r) + rowID, columnID, timestamp, err := readCSVRow(r) if err == io.EOF { break } else if err == errBlank { @@ -53,7 +53,7 @@ func (cmd *SortCommand) Run(ctx context.Context) error { } else if err != nil { return err } - a = append(a, pilosa.Bit{BitmapID: bitmapID, ProfileID: profileID, Timestamp: timestamp}) + a = append(a, pilosa.Bit{RowID: rowID, ColumnID: columnID, Timestamp: timestamp}) } // Sort bits by position. @@ -65,10 +65,10 @@ func (cmd *SortCommand) Run(ctx context.Context) error { for _, bit := range a { // Write CSV to buffer. buf = buf[:0] - buf = strconv.AppendUint(buf, bit.BitmapID, 10) + buf = strconv.AppendUint(buf, bit.RowID, 10) buf = append(buf, ',') - buf = strconv.AppendUint(buf, bit.ProfileID, 10) + buf = strconv.AppendUint(buf, bit.ColumnID, 10) if bit.Timestamp != 0 { buf = append(buf, ',') @@ -91,8 +91,8 @@ func (cmd *SortCommand) Run(ctx context.Context) error { return nil } -// readCSVRow reads a bitmap/profile pair from a CSV row. -func readCSVRow(r *csv.Reader) (bitmapID, profileID uint64, timestamp int64, err error) { +// readCSVRow reads a row/column pair from a CSV row. +func readCSVRow(r *csv.Reader) (rowID, columnID uint64, timestamp int64, err error) { // Read CSV row. record, err := r.Read() if err != nil { @@ -106,16 +106,16 @@ func readCSVRow(r *csv.Reader) (bitmapID, profileID uint64, timestamp int64, err return 0, 0, 0, fmt.Errorf("bad column count: %d", len(record)) } - // Parse bitmap id. - bitmapID, err = strconv.ParseUint(record[0], 10, 64) + // Parse row id. + rowID, err = strconv.ParseUint(record[0], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("invalid bitmap id: %q", record[0]) + return 0, 0, 0, fmt.Errorf("invalid row id: %q", record[0]) } - // Parse bitmap id. - profileID, err = strconv.ParseUint(record[1], 10, 64) + // Parse column id. + columnID, err = strconv.ParseUint(record[1], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("invalid profile id: %q", record[1]) + return 0, 0, 0, fmt.Errorf("invalid column id: %q", record[1]) } // Parse timestamp, if available. @@ -127,7 +127,7 @@ func readCSVRow(r *csv.Reader) (bitmapID, profileID uint64, timestamp int64, err timestamp = t.UnixNano() } - return bitmapID, profileID, timestamp, nil + return rowID, columnID, timestamp, nil } // errBlank indicates a blank row in a CSV file. diff --git a/db.go b/db.go deleted file mode 100644 index 3cfbf23e1..000000000 --- a/db.go +++ /dev/null @@ -1,565 +0,0 @@ -package pilosa - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/pilosa/pilosa/internal" -) - -// Default database settings. -const ( - DefaultColumnLabel = "profileID" -) - -// DB represents a container for frames. -type DB struct { - mu sync.Mutex - path string - name string - - // Default time quantum for all frames in database. - // This can be overridden by individual frames. - timeQuantum TimeQuantum - - // Label used for referring to columns in database. - columnLabel string - - // Frames by name. - frames map[string]*Frame - - // Max Slice on any node in the cluster, according to this node - remoteMaxSlice uint64 - remoteMaxInverseSlice uint64 - - // Profile attribute storage and cache - profileAttrStore *AttrStore - - broadcaster Broadcaster - Stats StatsClient - - LogOutput io.Writer -} - -// NewDB returns a new instance of DB. -func NewDB(path, name string) (*DB, error) { - err := ValidateName(name) - if err != nil { - return nil, err - } - - return &DB{ - path: path, - name: name, - frames: make(map[string]*Frame), - - remoteMaxSlice: 0, - remoteMaxInverseSlice: 0, - - profileAttrStore: NewAttrStore(filepath.Join(path, ".data")), - - columnLabel: DefaultColumnLabel, - - Stats: NopStatsClient, - LogOutput: ioutil.Discard, - }, nil -} - -// Name returns name of the database. -func (db *DB) Name() string { return db.name } - -// Path returns the path the database was initialized with. -func (db *DB) Path() string { return db.path } - -// ProfileAttrStore returns the storage for profile attributes. -func (db *DB) ProfileAttrStore() *AttrStore { return db.profileAttrStore } - -// SetColumnLabel sets the column label. Persists to meta file on update. -func (db *DB) SetColumnLabel(v string) error { - db.mu.Lock() - defer db.mu.Unlock() - - // Ignore if no change occurred. - if v == "" || db.columnLabel == v { - return nil - } - - // Make sure columnLabel is valid name - err := ValidateName(v) - if err != nil { - return err - } - - // Persist meta data to disk on change. - db.columnLabel = v - if err := db.saveMeta(); err != nil { - return err - } - - return nil -} - -// ColumnLabel returns the column label. -func (db *DB) ColumnLabel() string { - db.mu.Lock() - v := db.columnLabel - db.mu.Unlock() - return v -} - -// Open opens and initializes the database. -func (db *DB) Open() error { - // Ensure the path exists. - if err := os.MkdirAll(db.path, 0777); err != nil { - return err - } - - // Read meta file. - if err := db.loadMeta(); err != nil { - return err - } - - if err := db.openFrames(); err != nil { - return err - } - - if err := db.profileAttrStore.Open(); err != nil { - return err - } - - return nil -} - -// openFrames opens and initializes the frames inside the database. -func (db *DB) openFrames() error { - f, err := os.Open(db.path) - if err != nil { - return err - } - defer f.Close() - - fis, err := f.Readdir(0) - if err != nil { - return err - } - - for _, fi := range fis { - if !fi.IsDir() { - continue - } - - fr, err := db.newFrame(db.FramePath(filepath.Base(fi.Name())), filepath.Base(fi.Name())) - if err != nil { - return ErrName - } - if err := fr.Open(); err != nil { - return fmt.Errorf("open frame: name=%s, err=%s", fr.Name(), err) - } - db.frames[fr.Name()] = fr - - db.Stats.Count("frameN", 1) - } - return nil -} - -// loadMeta reads meta data for the database, if any. -func (db *DB) loadMeta() error { - var pb internal.DBMeta - - // Read data from meta file. - buf, err := ioutil.ReadFile(filepath.Join(db.path, ".meta")) - if os.IsNotExist(err) { - db.timeQuantum = "" - db.columnLabel = DefaultColumnLabel - return nil - } else if err != nil { - return err - } else { - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - } - - // Copy metadata fields. - db.timeQuantum = TimeQuantum(pb.TimeQuantum) - db.columnLabel = pb.ColumnLabel - - return nil -} - -// saveMeta writes meta data for the database. -func (db *DB) saveMeta() error { - // Marshal metadata. - buf, err := proto.Marshal(&internal.DBMeta{ - TimeQuantum: string(db.timeQuantum), - ColumnLabel: db.columnLabel, - }) - if err != nil { - return err - } - - // Write to meta file. - if err := ioutil.WriteFile(filepath.Join(db.path, ".meta"), buf, 0666); err != nil { - return err - } - - return nil -} - -// Close closes the database and its frames. -func (db *DB) Close() error { - db.mu.Lock() - defer db.mu.Unlock() - - // Close the attribute store. - if db.profileAttrStore != nil { - db.profileAttrStore.Close() - } - - // Close all frames. - for _, f := range db.frames { - f.Close() - } - db.frames = make(map[string]*Frame) - - return nil -} - -// MaxSlice returns the max slice in the database according to this node. -func (db *DB) MaxSlice() uint64 { - if db == nil { - return 0 - } - db.mu.Lock() - defer db.mu.Unlock() - - max := db.remoteMaxSlice - for _, f := range db.frames { - if slice := f.MaxSlice(); slice > max { - max = slice - } - } - return max -} - -func (db *DB) SetRemoteMaxSlice(newmax uint64) { - db.mu.Lock() - defer db.mu.Unlock() - db.remoteMaxSlice = newmax -} - -// MaxInverseSlice returns the max inverse slice in the database according to this node. -func (db *DB) MaxInverseSlice() uint64 { - if db == nil { - return 0 - } - db.mu.Lock() - defer db.mu.Unlock() - - max := db.remoteMaxInverseSlice - for _, f := range db.frames { - if slice := f.MaxInverseSlice(); slice > max { - max = slice - } - } - return max -} - -func (db *DB) SetRemoteMaxInverseSlice(v uint64) { - db.mu.Lock() - defer db.mu.Unlock() - db.remoteMaxInverseSlice = v -} - -// TimeQuantum returns the default time quantum for the database. -func (db *DB) TimeQuantum() TimeQuantum { - db.mu.Lock() - defer db.mu.Unlock() - return db.timeQuantum -} - -// SetTimeQuantum sets the default time quantum for the database. -func (db *DB) SetTimeQuantum(q TimeQuantum) error { - db.mu.Lock() - defer db.mu.Unlock() - - // Validate input. - if !q.Valid() { - return ErrInvalidTimeQuantum - } - - // Update value on database. - db.timeQuantum = q - - // Perist meta data to disk. - if err := db.saveMeta(); err != nil { - return err - } - - return nil -} - -// FramePath returns the path to a frame in the database. -func (db *DB) FramePath(name string) string { return filepath.Join(db.path, name) } - -// Frame returns a frame in the database by name. -func (db *DB) Frame(name string) *Frame { - db.mu.Lock() - defer db.mu.Unlock() - return db.frame(name) -} - -func (db *DB) frame(name string) *Frame { return db.frames[name] } - -// Frames returns a list of all frames in the database. -func (db *DB) Frames() []*Frame { - db.mu.Lock() - defer db.mu.Unlock() - - a := make([]*Frame, 0, len(db.frames)) - for _, f := range db.frames { - a = append(a, f) - } - sort.Sort(frameSlice(a)) - - return a -} - -// CreateFrame creates a frame. -func (db *DB) CreateFrame(name string, opt FrameOptions) (*Frame, error) { - db.mu.Lock() - defer db.mu.Unlock() - - // Ensure frame doesn't already exist. - if db.frames[name] != nil { - return nil, ErrFrameExists - } - return db.createFrame(name, opt) -} - -// CreateFrameIfNotExists creates a frame with the given options if it doesn't exist. -func (db *DB) CreateFrameIfNotExists(name string, opt FrameOptions) (*Frame, error) { - db.mu.Lock() - defer db.mu.Unlock() - - // Find frame in cache first. - if f := db.frames[name]; f != nil { - return f, nil - } - - return db.createFrame(name, opt) -} - -func (db *DB) createFrame(name string, opt FrameOptions) (*Frame, error) { - if name == "" { - return nil, errors.New("frame name required") - } else if opt.CacheType != "" && !IsValidCacheType(opt.CacheType) { - return nil, ErrInvalidCacheType - } - - // Initialize frame. - f, err := db.newFrame(db.FramePath(name), name) - if err != nil { - return nil, err - } - - // Open frame. - if err := f.Open(); err != nil { - return nil, err - } - - // Default the time quantum to what is set on the DB. - if err := f.SetTimeQuantum(db.timeQuantum); err != nil { - f.Close() - return nil, err - } - - // Set cache type. - if opt.CacheType == "" { - opt.CacheType = DefaultCacheType - } - f.cacheType = opt.CacheType - - // Set options. - if opt.RowLabel != "" { - f.rowLabel = opt.RowLabel - } - if opt.CacheSize != 0 { - f.cacheSize = opt.CacheSize - } - - f.inverseEnabled = opt.InverseEnabled - if err := f.saveMeta(); err != nil { - f.Close() - return nil, err - } - - // Add to database's frame lookup. - db.frames[name] = f - - db.Stats.Count("frameN", 1) - - return f, nil -} - -func (db *DB) newFrame(path, name string) (*Frame, error) { - f, err := NewFrame(path, db.name, name) - if err != nil { - return nil, err - } - f.LogOutput = db.LogOutput - f.Stats = db.Stats.WithTags(fmt.Sprintf("frame:%s", name)) - f.broadcaster = db.broadcaster - return f, nil -} - -// DeleteFrame removes a frame from the database. -func (db *DB) DeleteFrame(name string) error { - db.mu.Lock() - defer db.mu.Unlock() - - // Ignore if frame doesn't exist. - f := db.frame(name) - if f == nil { - return nil - } - - // Close frame. - if err := f.Close(); err != nil { - return err - } - - // Delete frame directory. - if err := os.RemoveAll(db.FramePath(name)); err != nil { - return err - } - - // Remove reference. - delete(db.frames, name) - - db.Stats.Count("frameN", -1) - - return nil -} - -type dbSlice []*DB - -func (p dbSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p dbSlice) Len() int { return len(p) } -func (p dbSlice) Less(i, j int) bool { return p[i].Name() < p[j].Name() } - -// DBInfo represents schema information for a database. -type DBInfo struct { - Name string `json:"name"` - Frames []*FrameInfo `json:"frames"` -} - -type dbInfoSlice []*DBInfo - -func (p dbInfoSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p dbInfoSlice) Len() int { return len(p) } -func (p dbInfoSlice) Less(i, j int) bool { return p[i].Name < p[j].Name } - -// MergeSchemas combines databases and frames from a and b into one schema. -func MergeSchemas(a, b []*DBInfo) []*DBInfo { - // Generate a map from both schemas. - m := make(map[string]map[string]map[string]struct{}) - for _, dbs := range [][]*DBInfo{a, b} { - for _, db := range dbs { - if m[db.Name] == nil { - m[db.Name] = make(map[string]map[string]struct{}) - } - for _, frame := range db.Frames { - if m[db.Name][frame.Name] == nil { - m[db.Name][frame.Name] = make(map[string]struct{}) - } - for _, view := range frame.Views { - m[db.Name][frame.Name][view.Name] = struct{}{} - } - } - } - } - - // Generate new schema from map. - dbs := make([]*DBInfo, 0, len(m)) - for db, frames := range m { - di := &DBInfo{Name: db} - for frame, views := range frames { - fi := &FrameInfo{Name: frame} - for view := range views { - fi.Views = append(fi.Views, &ViewInfo{Name: view}) - } - sort.Sort(viewInfoSlice(fi.Views)) - di.Frames = append(di.Frames, fi) - } - sort.Sort(frameInfoSlice(di.Frames)) - dbs = append(dbs, di) - } - sort.Sort(dbInfoSlice(dbs)) - - return dbs -} - -// encodeDBs converts a into its internal representation. -func encodeDBs(a []*DB) []*internal.DB { - other := make([]*internal.DB, len(a)) - for i := range a { - other[i] = encodeDB(a[i]) - } - return other -} - -// encodeDB converts d into its internal representation. -func encodeDB(d *DB) *internal.DB { - return &internal.DB{ - Name: d.name, - Meta: &internal.DBMeta{ - ColumnLabel: d.columnLabel, - TimeQuantum: string(d.timeQuantum), - }, - MaxSlice: d.remoteMaxSlice, - Frames: encodeFrames(d.Frames()), - } -} - -// DBOptions represents options to set when initializing a db. -type DBOptions struct { - ColumnLabel string `json:"columnLabel,omitempty"` - TimeQuantum TimeQuantum `json:"timeQuantum,omitempty"` -} - -// Encode converts o into its internal representation. -func (o *DBOptions) Encode() *internal.DBMeta { - return &internal.DBMeta{ - ColumnLabel: o.ColumnLabel, - TimeQuantum: string(o.TimeQuantum), - } -} - -// hasTime returns true if a contains a non-nil time. -func hasTime(a []*time.Time) bool { - for _, t := range a { - if t != nil { - return true - } - } - return false -} - -type importKey struct { - View string - Slice uint64 -} - -type importData struct { - BitmapIDs []uint64 - ProfileIDs []uint64 -} diff --git a/db_test.go b/db_test.go deleted file mode 100644 index f7cfc906a..000000000 --- a/db_test.go +++ /dev/null @@ -1,179 +0,0 @@ -package pilosa_test - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/pilosa/pilosa" -) - -// Ensure database can open and retrieve a frame. -func TestDB_CreateFrameIfNotExists(t *testing.T) { - db := MustOpenDB() - defer db.Close() - - // Create frame. - f, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) - if err != nil { - t.Fatal(err) - } else if f == nil { - t.Fatal("expected frame") - } - - // Retrieve existing frame. - other, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) - if err != nil { - t.Fatal(err) - } else if f.Frame != other.Frame { - t.Fatal("frame mismatch") - } - - if f.Frame != db.Frame("f") { - t.Fatal("frame mismatch") - } -} - -// Ensure database defaults the time quantum on new frames. -func TestDB_CreateFrame_TimeQuantum(t *testing.T) { - db := MustOpenDB() - defer db.Close() - - // Set database time quantum. - if err := db.SetTimeQuantum(pilosa.TimeQuantum("YM")); err != nil { - t.Fatal(err) - } - - // Create frame. - f, err := db.CreateFrame("f", pilosa.FrameOptions{}) - if err != nil { - t.Fatal(err) - } else if q := f.TimeQuantum(); q != pilosa.TimeQuantum("YM") { - t.Fatalf("unexpected frame time quantum: %s", q) - } -} - -// Ensure database can delete a frame. -func TestDB_DeleteFrame(t *testing.T) { - db := MustOpenDB() - defer db.Close() - - // Create frame. - if _, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{}); err != nil { - t.Fatal(err) - } - - // Delete frame & verify it's gone. - if err := db.DeleteFrame("f"); err != nil { - t.Fatal(err) - } else if db.Frame("f") != nil { - t.Fatal("expected nil frame") - } - - // Delete again to make sure it doesn't error. - if err := db.DeleteFrame("f"); err != nil { - t.Fatal(err) - } -} - -// Ensure database can set the default time quantum. -func TestDB_SetTimeQuantum(t *testing.T) { - db := MustOpenDB() - defer db.Close() - - // Set & retrieve time quantum. - if err := db.SetTimeQuantum(pilosa.TimeQuantum("YMDH")); err != nil { - t.Fatal(err) - } else if q := db.TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { - t.Fatalf("unexpected quantum: %s", q) - } - - // Reload database and verify that it is persisted. - if err := db.Reopen(); err != nil { - t.Fatal(err) - } else if q := db.TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { - t.Fatalf("unexpected quantum (reopen): %s", q) - } -} - -// DB represents a test wrapper for pilosa.DB. -type DB struct { - *pilosa.DB -} - -// NewDB returns a new instance of DB d. -func NewDB() *DB { - path, err := ioutil.TempDir("", "pilosa-db-") - if err != nil { - panic(err) - } - db, err := pilosa.NewDB(path, "d") - if err != nil { - panic(err) - } - return &DB{DB: db} -} - -// MustOpenDB returns a new, opened database at a temporary path. Panic on error. -func MustOpenDB() *DB { - db := NewDB() - if err := db.Open(); err != nil { - panic(err) - } - return db -} - -// Close closes the database and removes the underlying data. -func (db *DB) Close() error { - defer os.RemoveAll(db.Path()) - return db.DB.Close() -} - -// Reopen closes the database and reopens it. -func (db *DB) Reopen() error { - var err error - if err := db.DB.Close(); err != nil { - return err - } - - path, name := db.Path(), db.Name() - db.DB, err = pilosa.NewDB(path, name) - if err != nil { - return err - } - - if err := db.Open(); err != nil { - return err - } - return nil -} - -// CreateFrame creates a frame with the given options. -func (db *DB) CreateFrame(name string, opt pilosa.FrameOptions) (*Frame, error) { - f, err := db.DB.CreateFrame(name, opt) - if err != nil { - return nil, err - } - return &Frame{Frame: f}, nil -} - -// CreateFrameIfNotExists creates a frame with the given options if it doesn't exist. -func (db *DB) CreateFrameIfNotExists(name string, opt pilosa.FrameOptions) (*Frame, error) { - f, err := db.DB.CreateFrameIfNotExists(name, opt) - if err != nil { - return nil, err - } - return &Frame{Frame: f}, nil -} - -// Ensure database can delete a frame. -func TestDB_InvalidName(t *testing.T) { - path, err := ioutil.TempDir("", "pilosa-db-") - if err != nil { - panic(err) - } - db, err := pilosa.NewDB(path, "ABC") - if db != nil { - t.Fatalf("unexpected db name %s", db) - } -} diff --git a/executor.go b/executor.go index 52e4c7402..e467385b0 100644 --- a/executor.go +++ b/executor.go @@ -21,13 +21,13 @@ const ( DefaultFrame = "general" // MinThreshold is the lowest count to use in a Top-N operation when - // looking for additional bitmap/count pairs. + // looking for additional id/count pairs. MinThreshold = 1 ) // Executor recursively executes calls in a PQL query across all slices. type Executor struct { - Index *Index + Holder *Holder // Local hostname & cluster configuration. Host string @@ -45,10 +45,10 @@ func NewExecutor() *Executor { } // Execute executes a PQL query. -func (e *Executor) Execute(ctx context.Context, db string, q *pql.Query, slices []uint64, opt *ExecOptions) ([]interface{}, error) { - // Verify that a database is set. - if db == "" { - return nil, ErrDatabaseRequired +func (e *Executor) Execute(ctx context.Context, index string, q *pql.Query, slices []uint64, opt *ExecOptions) ([]interface{}, error) { + // Verify that an index is set. + if index == "" { + return nil, ErrIndexRequired } // Default options. @@ -60,7 +60,7 @@ func (e *Executor) Execute(ctx context.Context, db string, q *pql.Query, slices if len(slices) == 0 { if needsSlices(q.Calls) { // Round up the number of slices. - maxSlice := e.Index.DB(db).MaxSlice() + maxSlice := e.Holder.Index(index).MaxSlice() // Generate a slices of all slices. slices = make([]uint64, maxSlice+1) @@ -71,14 +71,14 @@ func (e *Executor) Execute(ctx context.Context, db string, q *pql.Query, slices } // Optimize handling for bulk attribute insertion. - if hasOnlySetBitmapAttrs(q.Calls) { - return e.executeBulkSetBitmapAttrs(ctx, db, q.Calls, opt) + if hasOnlySetRowAttrs(q.Calls) { + return e.executeBulkSetRowAttrs(ctx, index, q.Calls, opt) } // Execute each call serially. results := make([]interface{}, 0, len(q.Calls)) for _, call := range q.Calls { - v, err := e.executeCall(ctx, db, call, slices, opt) + v, err := e.executeCall(ctx, index, call, slices, opt) if err != nil { return nil, err } @@ -88,31 +88,31 @@ func (e *Executor) Execute(ctx context.Context, db string, q *pql.Query, slices } // executeCall executes a call. -func (e *Executor) executeCall(ctx context.Context, db string, c *pql.Call, slices []uint64, opt *ExecOptions) (interface{}, error) { +func (e *Executor) executeCall(ctx context.Context, index string, c *pql.Call, slices []uint64, opt *ExecOptions) (interface{}, error) { if err := e.validateCallArgs(c); err != nil { return nil, err } - dbTag := fmt.Sprintf("db:%s", db) + indexTag := fmt.Sprintf("index:%s", index) // Special handling for mutation and top-n calls. switch c.Name { case "ClearBit": - return e.executeClearBit(ctx, db, c, opt) + return e.executeClearBit(ctx, index, c, opt) case "Count": - e.Index.Stats.CountWithCustomTags(c.Name, 1, []string{dbTag}) - return e.executeCount(ctx, db, c, slices, opt) + e.Holder.Stats.CountWithCustomTags(c.Name, 1, []string{indexTag}) + return e.executeCount(ctx, index, c, slices, opt) case "SetBit": - return e.executeSetBit(ctx, db, c, opt) - case "SetBitmapAttrs": - return nil, e.executeSetBitmapAttrs(ctx, db, c, opt) - case "SetProfileAttrs": - return nil, e.executeSetProfileAttrs(ctx, db, c, opt) + return e.executeSetBit(ctx, index, c, opt) + case "SetRowAttrs": + return nil, e.executeSetRowAttrs(ctx, index, c, opt) + case "SetColumnAttrs": + return nil, e.executeSetColumnAttrs(ctx, index, c, opt) case "TopN": - e.Index.Stats.CountWithCustomTags(c.Name, 1, []string{dbTag}) - return e.executeTopN(ctx, db, c, slices, opt) + e.Holder.Stats.CountWithCustomTags(c.Name, 1, []string{indexTag}) + return e.executeTopN(ctx, index, c, slices, opt) default: - e.Index.Stats.CountWithCustomTags(c.Name, 1, []string{dbTag}) - return e.executeBitmapCall(ctx, db, c, slices, opt) + e.Holder.Stats.CountWithCustomTags(c.Name, 1, []string{indexTag}) + return e.executeBitmapCall(ctx, index, c, slices, opt) } } @@ -136,10 +136,10 @@ func (e *Executor) validateCallArgs(c *pql.Call) error { } // executeBitmapCall executes a call that returns a bitmap. -func (e *Executor) executeBitmapCall(ctx context.Context, db string, c *pql.Call, slices []uint64, opt *ExecOptions) (*Bitmap, error) { +func (e *Executor) executeBitmapCall(ctx context.Context, index string, c *pql.Call, slices []uint64, opt *ExecOptions) (*Bitmap, error) { // Execute calls in bulk on each remote node and merge. mapFn := func(slice uint64) (interface{}, error) { - return e.executeBitmapCallSlice(ctx, db, c, slice) + return e.executeBitmapCallSlice(ctx, index, c, slice) } // Merge returned results at coordinating node. @@ -152,22 +152,22 @@ func (e *Executor) executeBitmapCall(ctx context.Context, db string, c *pql.Call return other } - other, err := e.mapReduce(ctx, db, slices, c, opt, mapFn, reduceFn) + other, err := e.mapReduce(ctx, index, slices, c, opt, mapFn, reduceFn) if err != nil { return nil, err } // Attach attributes for Bitmap() calls. - // If the column label is used then return profile attributes. + // If the column label is used then return column attributes. // If the row label is used then return bitmap attributes. bm, _ := other.(*Bitmap) if c.Name == "Bitmap" { - d := e.Index.DB(db) - if d != nil { - columnLabel := d.ColumnLabel() + idx := e.Holder.Index(index) + if idx != nil { + columnLabel := idx.ColumnLabel() if columnID, ok, err := c.UintArg(columnLabel); ok && err == nil { - attrs, err := d.ProfileAttrStore().Attrs(columnID) + attrs, err := idx.ColumnAttrStore().Attrs(columnID) if err != nil { return nil, err } @@ -176,13 +176,13 @@ func (e *Executor) executeBitmapCall(ctx context.Context, db string, c *pql.Call return nil, err } else { frame, _ := c.Args["frame"].(string) - if fr := d.Frame(frame); fr != nil { + if fr := idx.Frame(frame); fr != nil { rowLabel := fr.RowLabel() rowID, _, err := c.UintArg(rowLabel) if err != nil { return nil, err } - attrs, err := fr.BitmapAttrStore().Attrs(rowID) + attrs, err := fr.RowAttrStore().Attrs(rowID) if err != nil { return nil, err } @@ -196,18 +196,18 @@ func (e *Executor) executeBitmapCall(ctx context.Context, db string, c *pql.Call } // executeBitmapCallSlice executes a bitmap call for a single slice. -func (e *Executor) executeBitmapCallSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { +func (e *Executor) executeBitmapCallSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { switch c.Name { case "Bitmap": - return e.executeBitmapSlice(ctx, db, c, slice) + return e.executeBitmapSlice(ctx, index, c, slice) case "Difference": - return e.executeDifferenceSlice(ctx, db, c, slice) + return e.executeDifferenceSlice(ctx, index, c, slice) case "Intersect": - return e.executeIntersectSlice(ctx, db, c, slice) + return e.executeIntersectSlice(ctx, index, c, slice) case "Range": - return e.executeRangeSlice(ctx, db, c, slice) + return e.executeRangeSlice(ctx, index, c, slice) case "Union": - return e.executeUnionSlice(ctx, db, c, slice) + return e.executeUnionSlice(ctx, index, c, slice) default: return nil, fmt.Errorf("unknown call: %s", c.Name) } @@ -216,8 +216,8 @@ func (e *Executor) executeBitmapCallSlice(ctx context.Context, db string, c *pql // executeTopN executes a TopN() call. // This first performs the TopN() to determine the top results and then // requeries to retrieve the full counts for each of the top results. -func (e *Executor) executeTopN(ctx context.Context, db string, c *pql.Call, slices []uint64, opt *ExecOptions) ([]Pair, error) { - bitmapIDs, _, err := c.UintSliceArg("ids") +func (e *Executor) executeTopN(ctx context.Context, index string, c *pql.Call, slices []uint64, opt *ExecOptions) ([]Pair, error) { + rowIDs, _, err := c.UintSliceArg("ids") if err != nil { return nil, fmt.Errorf("executeTopN: %v", err) } @@ -227,14 +227,14 @@ func (e *Executor) executeTopN(ctx context.Context, db string, c *pql.Call, slic } // Execute original query. - pairs, err := e.executeTopNSlices(ctx, db, c, slices, opt) + pairs, err := e.executeTopNSlices(ctx, index, c, slices, opt) if err != nil { return nil, err } // If this call is against specific ids, or we didn't get results, // or we are part of a larger distributed query then don't refetch. - if len(pairs) == 0 || len(bitmapIDs) > 0 || opt.Remote { + if len(pairs) == 0 || len(rowIDs) > 0 || opt.Remote { return pairs, nil } // Only the original caller should refetch the full counts. @@ -244,7 +244,7 @@ func (e *Executor) executeTopN(ctx context.Context, db string, c *pql.Call, slic sort.Sort(uint64Slice(ids)) other.Args["ids"] = ids - trimmedList, err := e.executeTopNSlices(ctx, db, other, slices, opt) + trimmedList, err := e.executeTopNSlices(ctx, index, other, slices, opt) if err != nil { return nil, err } @@ -255,10 +255,10 @@ func (e *Executor) executeTopN(ctx context.Context, db string, c *pql.Call, slic return trimmedList, nil } -func (e *Executor) executeTopNSlices(ctx context.Context, db string, c *pql.Call, slices []uint64, opt *ExecOptions) ([]Pair, error) { +func (e *Executor) executeTopNSlices(ctx context.Context, index string, c *pql.Call, slices []uint64, opt *ExecOptions) ([]Pair, error) { // Execute calls in bulk on each remote node and merge. mapFn := func(slice uint64) (interface{}, error) { - return e.executeTopNSlice(ctx, db, c, slice) + return e.executeTopNSlice(ctx, index, c, slice) } // Merge returned results at coordinating node. @@ -267,7 +267,7 @@ func (e *Executor) executeTopNSlices(ctx context.Context, db string, c *pql.Call return Pairs(other).Add(v.([]Pair)) } - other, err := e.mapReduce(ctx, db, slices, c, opt, mapFn, reduceFn) + other, err := e.mapReduce(ctx, index, slices, c, opt, mapFn, reduceFn) if err != nil { return nil, err } @@ -280,14 +280,14 @@ func (e *Executor) executeTopNSlices(ctx context.Context, db string, c *pql.Call } // executeTopNSlice executes a TopN call for a single slice. -func (e *Executor) executeTopNSlice(ctx context.Context, db string, c *pql.Call, slice uint64) ([]Pair, error) { +func (e *Executor) executeTopNSlice(ctx context.Context, index string, c *pql.Call, slice uint64) ([]Pair, error) { frame, _ := c.Args["frame"].(string) n, _, err := c.UintArg("n") if err != nil { return nil, fmt.Errorf("executeTopNSlice: %v", err) } field, _ := c.Args["field"].(string) - bitmapIDs, _, err := c.UintSliceArg("ids") + rowIDs, _, err := c.UintSliceArg("ids") if err != nil { return nil, fmt.Errorf("executeTopNSlice: %v", err) } @@ -304,7 +304,7 @@ func (e *Executor) executeTopNSlice(ctx context.Context, db string, c *pql.Call, // Retrieve bitmap used to intersect. var src *Bitmap if len(c.Children) == 1 { - bm, err := e.executeBitmapCallSlice(ctx, db, c.Children[0], slice) + bm, err := e.executeBitmapCallSlice(ctx, index, c.Children[0], slice) if err != nil { return nil, err } @@ -318,7 +318,7 @@ func (e *Executor) executeTopNSlice(ctx context.Context, db string, c *pql.Call, frame = DefaultFrame } - f := e.Index.Fragment(db, frame, ViewStandard, slice) + f := e.Holder.Fragment(index, frame, ViewStandard, slice) if f == nil { return nil, nil } @@ -333,7 +333,7 @@ func (e *Executor) executeTopNSlice(ctx context.Context, db string, c *pql.Call, return f.Top(TopOptions{ N: int(n), Src: src, - BitmapIDs: bitmapIDs, + RowIDs: rowIDs, FilterField: field, FilterValues: filters, MinThreshold: minThreshold, @@ -342,13 +342,13 @@ func (e *Executor) executeTopNSlice(ctx context.Context, db string, c *pql.Call, } // executeDifferenceSlice executes a difference() call for a local slice. -func (e *Executor) executeDifferenceSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { +func (e *Executor) executeDifferenceSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { var other *Bitmap if len(c.Children) == 0 { return nil, fmt.Errorf("empty Difference query is currently not supported") } for i, input := range c.Children { - bm, err := e.executeBitmapCallSlice(ctx, db, input, slice) + bm, err := e.executeBitmapCallSlice(ctx, index, input, slice) if err != nil { return nil, err } @@ -363,20 +363,20 @@ func (e *Executor) executeDifferenceSlice(ctx context.Context, db string, c *pql return other, nil } -func (e *Executor) executeBitmapSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { - // Fetch column label from database. - d := e.Index.DB(db) - if d == nil { - return nil, ErrDatabaseNotFound +func (e *Executor) executeBitmapSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { + // Fetch column label from index. + idx := e.Holder.Index(index) + if idx == nil { + return nil, ErrIndexNotFound } - columnLabel := d.ColumnLabel() + columnLabel := idx.ColumnLabel() // Fetch frame & row label based on argument. frame, _ := c.Args["frame"].(string) if frame == "" { frame = DefaultFrame } - f := e.Index.Frame(db, frame) + f := e.Holder.Frame(index, frame) if f == nil { return nil, ErrFrameNotFound } @@ -403,21 +403,21 @@ func (e *Executor) executeBitmapSlice(ctx context.Context, db string, c *pql.Cal } } - frag := e.Index.Fragment(db, frame, view, slice) + frag := e.Holder.Fragment(index, frame, view, slice) if frag == nil { return NewBitmap(), nil } - return frag.Bitmap(id), nil + return frag.Row(id), nil } // executeIntersectSlice executes a intersect() call for a local slice. -func (e *Executor) executeIntersectSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { +func (e *Executor) executeIntersectSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { var other *Bitmap if len(c.Children) == 0 { return nil, fmt.Errorf("empty Intersect query is currently not supported") } for i, input := range c.Children { - bm, err := e.executeBitmapCallSlice(ctx, db, input, slice) + bm, err := e.executeBitmapCallSlice(ctx, index, input, slice) if err != nil { return nil, err } @@ -433,7 +433,7 @@ func (e *Executor) executeIntersectSlice(ctx context.Context, db string, c *pql. } // executeRangeSlice executes a range() call for a local slice. -func (e *Executor) executeRangeSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { +func (e *Executor) executeRangeSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { // Parse frame, use default if unset. frame, _ := c.Args["frame"].(string) if frame == "" { @@ -441,7 +441,7 @@ func (e *Executor) executeRangeSlice(ctx context.Context, db string, c *pql.Call } // Retrieve base frame. - f := e.Index.Frame(db, frame) + f := e.Holder.Frame(index, frame) if f == nil { return nil, ErrFrameNotFound } @@ -482,21 +482,21 @@ func (e *Executor) executeRangeSlice(ctx context.Context, db string, c *pql.Call // Union bitmaps across all time-based subframes. bm := &Bitmap{} for _, view := range ViewsByTimeRange(ViewStandard, startTime, endTime, q) { - f := e.Index.Fragment(db, frame, view, slice) + f := e.Holder.Fragment(index, frame, view, slice) if f == nil { continue } - bm = bm.Union(f.Bitmap(rowID)) + bm = bm.Union(f.Row(rowID)) } f.Stats.Count("range", 1) return bm, nil } // executeUnionSlice executes a union() call for a local slice. -func (e *Executor) executeUnionSlice(ctx context.Context, db string, c *pql.Call, slice uint64) (*Bitmap, error) { +func (e *Executor) executeUnionSlice(ctx context.Context, index string, c *pql.Call, slice uint64) (*Bitmap, error) { other := NewBitmap() for i, input := range c.Children { - bm, err := e.executeBitmapCallSlice(ctx, db, input, slice) + bm, err := e.executeBitmapCallSlice(ctx, index, input, slice) if err != nil { return nil, err } @@ -512,7 +512,7 @@ func (e *Executor) executeUnionSlice(ctx context.Context, db string, c *pql.Call } // executeCount executes a count() call. -func (e *Executor) executeCount(ctx context.Context, db string, c *pql.Call, slices []uint64, opt *ExecOptions) (uint64, error) { +func (e *Executor) executeCount(ctx context.Context, index string, c *pql.Call, slices []uint64, opt *ExecOptions) (uint64, error) { if len(c.Children) == 0 { return 0, errors.New("Count() requires an input bitmap") } else if len(c.Children) > 1 { @@ -521,7 +521,7 @@ func (e *Executor) executeCount(ctx context.Context, db string, c *pql.Call, sli // Execute calls in bulk on each remote node and merge. mapFn := func(slice uint64) (interface{}, error) { - bm, err := e.executeBitmapCallSlice(ctx, db, c.Children[0], slice) + bm, err := e.executeBitmapCallSlice(ctx, index, c.Children[0], slice) if err != nil { return 0, err } @@ -534,7 +534,7 @@ func (e *Executor) executeCount(ctx context.Context, db string, c *pql.Call, sli return other + v.(uint64) } - result, err := e.mapReduce(ctx, db, slices, c, opt, mapFn, reduceFn) + result, err := e.mapReduce(ctx, index, slices, c, opt, mapFn, reduceFn) if err != nil { return 0, err } @@ -544,7 +544,7 @@ func (e *Executor) executeCount(ctx context.Context, db string, c *pql.Call, sli } // executeClearBit executes a ClearBit() call. -func (e *Executor) executeClearBit(ctx context.Context, db string, c *pql.Call, opt *ExecOptions) (bool, error) { +func (e *Executor) executeClearBit(ctx context.Context, index string, c *pql.Call, opt *ExecOptions) (bool, error) { view, _ := c.Args["view"].(string) frame, ok := c.Args["frame"].(string) if !ok { @@ -552,17 +552,17 @@ func (e *Executor) executeClearBit(ctx context.Context, db string, c *pql.Call, } // Retrieve frame. - d := e.Index.DB(db) - if d == nil { - return false, ErrDatabaseNotFound + idx := e.Holder.Index(index) + if idx == nil { + return false, ErrIndexNotFound } - f := d.Frame(frame) + f := idx.Frame(frame) if f == nil { return false, ErrFrameNotFound } // Retrieve labels. - columnLabel := d.ColumnLabel() + columnLabel := idx.ColumnLabel() rowLabel := f.RowLabel() // Read fields using labels. @@ -583,19 +583,19 @@ func (e *Executor) executeClearBit(ctx context.Context, db string, c *pql.Call, // Clear bits for each view. switch view { case ViewStandard: - return e.executeClearBitView(ctx, db, c, f, view, colID, rowID, opt) + return e.executeClearBitView(ctx, index, c, f, view, colID, rowID, opt) case ViewInverse: - return e.executeClearBitView(ctx, db, c, f, view, rowID, colID, opt) + return e.executeClearBitView(ctx, index, c, f, view, rowID, colID, opt) case "": var ret bool - if changed, err := e.executeClearBitView(ctx, db, c, f, ViewStandard, colID, rowID, opt); err != nil { + if changed, err := e.executeClearBitView(ctx, index, c, f, ViewStandard, colID, rowID, opt); err != nil { return ret, err } else if changed { ret = true } if f.InverseEnabled() { - if changed, err := e.executeClearBitView(ctx, db, c, f, ViewInverse, rowID, colID, opt); err != nil { + if changed, err := e.executeClearBitView(ctx, index, c, f, ViewInverse, rowID, colID, opt); err != nil { return ret, err } else if changed { ret = true @@ -608,10 +608,10 @@ func (e *Executor) executeClearBit(ctx context.Context, db string, c *pql.Call, } // executeClearBitView executes a ClearBit() call for a single view. -func (e *Executor) executeClearBitView(ctx context.Context, db string, c *pql.Call, f *Frame, view string, colID, rowID uint64, opt *ExecOptions) (bool, error) { +func (e *Executor) executeClearBitView(ctx context.Context, index string, c *pql.Call, f *Frame, view string, colID, rowID uint64, opt *ExecOptions) (bool, error) { slice := colID / SliceWidth ret := false - for _, node := range e.Cluster.FragmentNodes(db, slice) { + for _, node := range e.Cluster.FragmentNodes(index, slice) { // Update locally if host matches. if node.Host == e.Host { val, err := f.ClearBit(view, rowID, colID, nil) @@ -628,7 +628,7 @@ func (e *Executor) executeClearBitView(ctx context.Context, db string, c *pql.Ca } // Forward call to remote node otherwise. - if res, err := e.exec(ctx, node, db, &pql.Query{Calls: []*pql.Call{c}}, nil, opt); err != nil { + if res, err := e.exec(ctx, node, index, &pql.Query{Calls: []*pql.Call{c}}, nil, opt); err != nil { return false, err } else { ret = res[0].(bool) @@ -638,7 +638,7 @@ func (e *Executor) executeClearBitView(ctx context.Context, db string, c *pql.Ca } // executeSetBit executes a SetBit() call. -func (e *Executor) executeSetBit(ctx context.Context, db string, c *pql.Call, opt *ExecOptions) (bool, error) { +func (e *Executor) executeSetBit(ctx context.Context, index string, c *pql.Call, opt *ExecOptions) (bool, error) { view, _ := c.Args["view"].(string) frame, ok := c.Args["frame"].(string) if !ok { @@ -646,17 +646,17 @@ func (e *Executor) executeSetBit(ctx context.Context, db string, c *pql.Call, op } // Retrieve frame. - d := e.Index.DB(db) - if d == nil { - return false, ErrDatabaseNotFound + idx := e.Holder.Index(index) + if idx == nil { + return false, ErrIndexNotFound } - f := d.Frame(frame) + f := idx.Frame(frame) if f == nil { return false, ErrFrameNotFound } // Retrieve labels. - columnLabel := d.ColumnLabel() + columnLabel := idx.ColumnLabel() rowLabel := f.RowLabel() // Read fields using labels. @@ -687,19 +687,19 @@ func (e *Executor) executeSetBit(ctx context.Context, db string, c *pql.Call, op // Set bits for each view. switch view { case ViewStandard: - return e.executeSetBitView(ctx, db, c, f, view, colID, rowID, timestamp, opt) + return e.executeSetBitView(ctx, index, c, f, view, colID, rowID, timestamp, opt) case ViewInverse: - return e.executeSetBitView(ctx, db, c, f, view, rowID, colID, timestamp, opt) + return e.executeSetBitView(ctx, index, c, f, view, rowID, colID, timestamp, opt) case "": var ret bool - if changed, err := e.executeSetBitView(ctx, db, c, f, ViewStandard, colID, rowID, timestamp, opt); err != nil { + if changed, err := e.executeSetBitView(ctx, index, c, f, ViewStandard, colID, rowID, timestamp, opt); err != nil { return ret, err } else if changed { ret = true } if f.InverseEnabled() { - if changed, err := e.executeSetBitView(ctx, db, c, f, ViewInverse, rowID, colID, timestamp, opt); err != nil { + if changed, err := e.executeSetBitView(ctx, index, c, f, ViewInverse, rowID, colID, timestamp, opt); err != nil { return ret, err } else if changed { ret = true @@ -712,11 +712,11 @@ func (e *Executor) executeSetBit(ctx context.Context, db string, c *pql.Call, op } // executeSetBitView executes a SetBit() call for a specific view. -func (e *Executor) executeSetBitView(ctx context.Context, db string, c *pql.Call, f *Frame, view string, colID, rowID uint64, timestamp *time.Time, opt *ExecOptions) (bool, error) { +func (e *Executor) executeSetBitView(ctx context.Context, index string, c *pql.Call, f *Frame, view string, colID, rowID uint64, timestamp *time.Time, opt *ExecOptions) (bool, error) { slice := colID / SliceWidth ret := false - for _, node := range e.Cluster.FragmentNodes(db, slice) { + for _, node := range e.Cluster.FragmentNodes(index, slice) { // Update locally if host matches. if node.Host == e.Host { val, err := f.SetBit(view, rowID, colID, timestamp) @@ -734,7 +734,7 @@ func (e *Executor) executeSetBitView(ctx context.Context, db string, c *pql.Call } // Forward call to remote node otherwise. - if res, err := e.exec(ctx, node, db, &pql.Query{Calls: []*pql.Call{c}}, nil, opt); err != nil { + if res, err := e.exec(ctx, node, index, &pql.Query{Calls: []*pql.Call{c}}, nil, opt); err != nil { return false, err } else { ret = res[0].(bool) @@ -743,15 +743,15 @@ func (e *Executor) executeSetBitView(ctx context.Context, db string, c *pql.Call return ret, nil } -// executeSetBitmapAttrs executes a SetBitmapAttrs() call. -func (e *Executor) executeSetBitmapAttrs(ctx context.Context, db string, c *pql.Call, opt *ExecOptions) error { +// executeSetRowAttrs executes a SetRowAttrs() call. +func (e *Executor) executeSetRowAttrs(ctx context.Context, index string, c *pql.Call, opt *ExecOptions) error { frameName, ok := c.Args["frame"].(string) if !ok { - return errors.New("SetBitmapAttrs() frame required") + return errors.New("SetRowAttrs() frame required") } // Retrieve frame. - frame := e.Index.Frame(db, frameName) + frame := e.Holder.Frame(index, frameName) if frame == nil { return ErrFrameNotFound } @@ -760,9 +760,9 @@ func (e *Executor) executeSetBitmapAttrs(ctx context.Context, db string, c *pql. // Parse labels. rowID, ok, err := c.UintArg(rowLabel) if err != nil { - return fmt.Errorf("reading SetBitmapAttrs() row: %v", err) + return fmt.Errorf("reading SetRowAttrs() row: %v", err) } else if !ok { - return fmt.Errorf("SetBitmapAttrs() row field '%v' required.", rowLabel) + return fmt.Errorf("SetRowAttrs() row field '%v' required.", rowLabel) } // Copy args and remove reserved fields. @@ -771,7 +771,7 @@ func (e *Executor) executeSetBitmapAttrs(ctx context.Context, db string, c *pql. delete(attrs, rowLabel) // Set attributes. - if err := frame.BitmapAttrStore().SetAttrs(rowID, attrs); err != nil { + if err := frame.RowAttrStore().SetAttrs(rowID, attrs); err != nil { return err } frame.Stats.Count("SetBitmapAttrs", 1) @@ -786,7 +786,7 @@ func (e *Executor) executeSetBitmapAttrs(ctx context.Context, db string, c *pql. resp := make(chan error, len(nodes)) for _, node := range nodes { go func(node *Node) { - _, err := e.exec(ctx, node, db, &pql.Query{Calls: []*pql.Call{c}}, nil, opt) + _, err := e.exec(ctx, node, index, &pql.Query{Calls: []*pql.Call{c}}, nil, opt) resp <- err }(node) } @@ -801,18 +801,18 @@ func (e *Executor) executeSetBitmapAttrs(ctx context.Context, db string, c *pql. return nil } -// executeBulkSetBitmapAttrs executes a set of SetBitmapAttrs() calls. -func (e *Executor) executeBulkSetBitmapAttrs(ctx context.Context, db string, calls []*pql.Call, opt *ExecOptions) ([]interface{}, error) { +// executeBulkSetRowAttrs executes a set of SetRowAttrs() calls. +func (e *Executor) executeBulkSetRowAttrs(ctx context.Context, index string, calls []*pql.Call, opt *ExecOptions) ([]interface{}, error) { // Collect attributes by frame/id. m := make(map[string]map[uint64]map[string]interface{}) for _, c := range calls { frame, ok := c.Args["frame"].(string) if !ok { - return nil, errors.New("SetBitmapAttrs() frame required") + return nil, errors.New("SetRowAttrs() frame required") } // Retrieve frame. - f := e.Index.Frame(db, frame) + f := e.Holder.Frame(index, frame) if f == nil { return nil, ErrFrameNotFound } @@ -820,9 +820,9 @@ func (e *Executor) executeBulkSetBitmapAttrs(ctx context.Context, db string, cal rowID, ok, err := c.UintArg(rowLabel) if err != nil { - return nil, fmt.Errorf("reading SetBitmapAttrs() row: %v", rowLabel) + return nil, fmt.Errorf("reading SetRowAttrs() row: %v", rowLabel) } else if !ok { - return nil, fmt.Errorf("SetBitmapAttrs row field '%v' required.", rowLabel) + return nil, fmt.Errorf("SetRowAttrs row field '%v' required.", rowLabel) } // Copy args and remove reserved fields. @@ -851,13 +851,13 @@ func (e *Executor) executeBulkSetBitmapAttrs(ctx context.Context, db string, cal // Bulk insert attributes by frame. for name, frameMap := range m { // Retrieve frame. - frame := e.Index.Frame(db, name) + frame := e.Holder.Frame(index, name) if frame == nil { return nil, ErrFrameNotFound } // Set attributes. - if err := frame.BitmapAttrStore().SetBulkAttrs(frameMap); err != nil { + if err := frame.RowAttrStore().SetBulkAttrs(frameMap); err != nil { return nil, err } frame.Stats.Count("SetBitmapAttrs", 1) @@ -873,7 +873,7 @@ func (e *Executor) executeBulkSetBitmapAttrs(ctx context.Context, db string, cal resp := make(chan error, len(nodes)) for _, node := range nodes { go func(node *Node) { - _, err := e.exec(ctx, node, db, &pql.Query{Calls: calls}, nil, opt) + _, err := e.exec(ctx, node, index, &pql.Query{Calls: calls}, nil, opt) resp <- err }(node) } @@ -889,22 +889,22 @@ func (e *Executor) executeBulkSetBitmapAttrs(ctx context.Context, db string, cal return make([]interface{}, len(calls)), nil } -// executeSetProfileAttrs executes a SetProfileAttrs() call. -func (e *Executor) executeSetProfileAttrs(ctx context.Context, db string, c *pql.Call, opt *ExecOptions) error { - // Retrieve database. - d := e.Index.DB(db) - if d == nil { - return ErrDatabaseNotFound +// executeSetColumnAttrs executes a SetColumnAttrs() call. +func (e *Executor) executeSetColumnAttrs(ctx context.Context, index string, c *pql.Call, opt *ExecOptions) error { + // Retrieve index. + idx := e.Holder.Index(index) + if idx == nil { + return ErrIndexNotFound } var colName string id, okID, errID := c.UintArg("id") if errID != nil || !okID { // Retrieve columnLabel - columnLabel := d.columnLabel + columnLabel := idx.columnLabel col, okCol, errCol := c.UintArg(columnLabel) if errCol != nil || !okCol { - return fmt.Errorf("reading SetProfileAttrs() id/columnLabel errs: %v/%v found %v/%v", errID, errCol, okID, okCol) + return fmt.Errorf("reading SetColumnAttrs() id/columnLabel errs: %v/%v found %v/%v", errID, errCol, okID, okCol) } id = col colName = columnLabel @@ -917,7 +917,7 @@ func (e *Executor) executeSetProfileAttrs(ctx context.Context, db string, c *pql delete(attrs, colName) // Set attributes. - if err := d.ProfileAttrStore().SetAttrs(id, attrs); err != nil { + if err := idx.ColumnAttrStore().SetAttrs(id, attrs); err != nil { return err } d.Stats.Count("SetProfileAttrs", 1) @@ -931,7 +931,7 @@ func (e *Executor) executeSetProfileAttrs(ctx context.Context, db string, c *pql resp := make(chan error, len(nodes)) for _, node := range nodes { go func(node *Node) { - _, err := e.exec(ctx, node, db, &pql.Query{Calls: []*pql.Call{c}}, nil, opt) + _, err := e.exec(ctx, node, index, &pql.Query{Calls: []*pql.Call{c}}, nil, opt) resp <- err }(node) } @@ -947,7 +947,7 @@ func (e *Executor) executeSetProfileAttrs(ctx context.Context, db string, c *pql } // exec executes a PQL query remotely for a set of slices on a node. -func (e *Executor) exec(ctx context.Context, node *Node, db string, q *pql.Query, slices []uint64, opt *ExecOptions) (results []interface{}, err error) { +func (e *Executor) exec(ctx context.Context, node *Node, index string, q *pql.Query, slices []uint64, opt *ExecOptions) (results []interface{}, err error) { // Encode request object. pbreq := &internal.QueryRequest{ Query: q.String(), @@ -963,7 +963,7 @@ func (e *Executor) exec(ctx context.Context, node *Node, db string, q *pql.Query req, err := http.NewRequest("POST", (&url.URL{ Scheme: "http", Host: node.Host, - Path: fmt.Sprintf("/db/%s/query", db), + Path: fmt.Sprintf("/index/%s/query", index), }).String(), bytes.NewReader(buf)) if err != nil { return nil, err @@ -1017,8 +1017,8 @@ func (e *Executor) exec(ctx context.Context, node *Node, db string, q *pql.Query v, err = pb.Results[i].Changed, nil case "ClearBit": v, err = pb.Results[i].Changed, nil - case "SetBitmapAttrs": - case "SetProfileAttrs": + case "SetRowAttrs": + case "SetColumnAttrs": default: v, err = decodeBitmap(pb.Results[i].GetBitmap()), nil } @@ -1033,12 +1033,12 @@ func (e *Executor) exec(ctx context.Context, node *Node, db string, q *pql.Query // slicesByNode returns a mapping of nodes to slices. // Returns errSliceUnavailable if a slice cannot be allocated to a node. -func (e *Executor) slicesByNode(nodes []*Node, db string, slices []uint64) (map[*Node][]uint64, error) { +func (e *Executor) slicesByNode(nodes []*Node, index string, slices []uint64) (map[*Node][]uint64, error) { m := make(map[*Node][]uint64) loop: for _, slice := range slices { - for _, node := range e.Cluster.FragmentNodes(db, slice) { + for _, node := range e.Cluster.FragmentNodes(index, slice) { if Nodes(nodes).Contains(node) { m[node] = append(m[node], slice) continue loop @@ -1053,7 +1053,7 @@ loop: // // If a mapping of slices to a node fails then the slices are resplit across // secondary nodes and retried. This continues to occur until all nodes are exhausted. -func (e *Executor) mapReduce(ctx context.Context, db string, slices []uint64, c *pql.Call, opt *ExecOptions, mapFn mapFunc, reduceFn reduceFunc) (interface{}, error) { +func (e *Executor) mapReduce(ctx context.Context, index string, slices []uint64, c *pql.Call, opt *ExecOptions, mapFn mapFunc, reduceFn reduceFunc) (interface{}, error) { ch := make(chan mapResponse, 0) // Wrap context with a cancel to kill goroutines on exit. @@ -1072,7 +1072,7 @@ func (e *Executor) mapReduce(ctx context.Context, db string, slices []uint64, c } // Start mapping across all primary owners. - if err := e.mapper(ctx, ch, nodes, db, slices, c, opt, mapFn, reduceFn); err != nil { + if err := e.mapper(ctx, ch, nodes, index, slices, c, opt, mapFn, reduceFn); err != nil { return nil, err } @@ -1091,7 +1091,7 @@ func (e *Executor) mapReduce(ctx context.Context, db string, slices []uint64, c nodes = Nodes(nodes).Filter(resp.node) // Begin mapper against secondary nodes. - if err := e.mapper(ctx, ch, nodes, db, resp.slices, c, opt, mapFn, reduceFn); err == errSliceUnavailable { + if err := e.mapper(ctx, ch, nodes, index, resp.slices, c, opt, mapFn, reduceFn); err == errSliceUnavailable { return nil, resp.err } else if err != nil { return nil, err @@ -1111,9 +1111,9 @@ func (e *Executor) mapReduce(ctx context.Context, db string, slices []uint64, c } } -func (e *Executor) mapper(ctx context.Context, ch chan mapResponse, nodes []*Node, db string, slices []uint64, c *pql.Call, opt *ExecOptions, mapFn mapFunc, reduceFn reduceFunc) error { +func (e *Executor) mapper(ctx context.Context, ch chan mapResponse, nodes []*Node, index string, slices []uint64, c *pql.Call, opt *ExecOptions, mapFn mapFunc, reduceFn reduceFunc) error { // Group slices together by nodes. - m, err := e.slicesByNode(nodes, db, slices) + m, err := e.slicesByNode(nodes, index, slices) if err != nil { return err } @@ -1128,7 +1128,7 @@ func (e *Executor) mapper(ctx context.Context, ch chan mapResponse, nodes []*Nod resp.result, resp.err = e.mapperLocal(ctx, nodeSlices, mapFn, reduceFn) } else if !opt.Remote { - results, err := e.exec(ctx, n, db, &pql.Query{Calls: []*pql.Call{c}}, nodeSlices, opt) + results, err := e.exec(ctx, n, index, &pql.Query{Calls: []*pql.Call{c}}, nodeSlices, opt) if len(results) > 0 { resp.result = results[0] } @@ -1213,14 +1213,14 @@ func decodeError(s string) error { return errors.New(s) } -// hasOnlySetBitmapAttrs returns true if calls only contains SetBitmapAttrs() calls. -func hasOnlySetBitmapAttrs(calls []*pql.Call) bool { +// hasOnlySetRowAttrs returns true if calls only contains SetRowAttrs() calls. +func hasOnlySetRowAttrs(calls []*pql.Call) bool { if len(calls) == 0 { return false } for _, call := range calls { - if call.Name != "SetBitmapAttrs" { + if call.Name != "SetRowAttrs" { return false } } @@ -1233,7 +1233,7 @@ func needsSlices(calls []*pql.Call) bool { } for _, call := range calls { switch call.Name { - case "ClearBit", "SetBit", "SetBitmapAttrs", "SetProfileAttrs": + case "ClearBit", "SetBit", "SetRowAttrs", "SetColumnAttrs": continue case "Count", "TopN": return true diff --git a/executor_test.go b/executor_test.go index 7a0c95622..8fed950b4 100644 --- a/executor_test.go +++ b/executor_test.go @@ -15,29 +15,29 @@ import ( // Ensure a bitmap query can be executed. func TestExecutor_Execute_Bitmap(t *testing.T) { t.Run("Row", func(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - db := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) - f, err := db.CreateFrame("f", pilosa.FrameOptions{InverseEnabled: true}) + hldr := MustOpenHolder() + defer hldr.Close() + index := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) + f, err := index.CreateFrame("f", pilosa.FrameOptions{InverseEnabled: true}) if err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, NewCluster(1)) + e := NewExecutor(hldr.Holder, NewCluster(1)) // Set bits. - if _, err := e.Execute(context.Background(), "d", MustParse(``+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 10, 3)+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 10, SliceWidth+1)+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 20, SliceWidth+1), + if _, err := e.Execute(context.Background(), "i", MustParse(``+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 10, 3)+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 10, SliceWidth+1)+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 20, SliceWidth+1), ), nil, nil); err != nil { t.Fatal(err) } - if err := f.BitmapAttrStore().SetAttrs(10, map[string]interface{}{"foo": "bar", "baz": uint64(123)}); err != nil { + if err := f.RowAttrStore().SetAttrs(10, map[string]interface{}{"foo": "bar", "baz": uint64(123)}); err != nil { t.Fatal(err) } - if res, err := e.Execute(context.Background(), "d", MustParse(`Bitmap(id=10, frame=f)`), nil, nil); err != nil { + if res, err := e.Execute(context.Background(), "i", MustParse(`Bitmap(id=10, frame=f)`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{3, SliceWidth + 1}) { t.Fatalf("unexpected bits: %+v", bits) @@ -47,28 +47,28 @@ func TestExecutor_Execute_Bitmap(t *testing.T) { }) t.Run("Column", func(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - db := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) - if _, err := db.CreateFrame("f", pilosa.FrameOptions{InverseEnabled: true}); err != nil { + hldr := MustOpenHolder() + defer hldr.Close() + index := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) + if _, err := index.CreateFrame("f", pilosa.FrameOptions{InverseEnabled: true}); err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, NewCluster(1)) + e := NewExecutor(hldr.Holder, NewCluster(1)) // Set bits. - if _, err := e.Execute(context.Background(), "d", MustParse(``+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 10, 3)+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 10, SliceWidth+1)+ - fmt.Sprintf("SetBit(frame=f, id=%d, profileID=%d)\n", 20, SliceWidth+1), + if _, err := e.Execute(context.Background(), "i", MustParse(``+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 10, 3)+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 10, SliceWidth+1)+ + fmt.Sprintf("SetBit(frame=f, id=%d, columnID=%d)\n", 20, SliceWidth+1), ), nil, nil); err != nil { t.Fatal(err) } - if err := db.ProfileAttrStore().SetAttrs(SliceWidth+1, map[string]interface{}{"foo": "bar", "baz": uint64(123)}); err != nil { + if err := index.ColumnAttrStore().SetAttrs(SliceWidth+1, map[string]interface{}{"foo": "bar", "baz": uint64(123)}); err != nil { t.Fatal(err) } - if res, err := e.Execute(context.Background(), "d", MustParse(fmt.Sprintf(`Bitmap(profileID=%d, frame=f)`, SliceWidth+1)), nil, nil); err != nil { + if res, err := e.Execute(context.Background(), "i", MustParse(fmt.Sprintf(`Bitmap(columnID=%d, frame=f)`, SliceWidth+1)), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{10, 20}) { t.Fatalf("unexpected bits: %+v", bits) @@ -80,16 +80,16 @@ func TestExecutor_Execute_Bitmap(t *testing.T) { // Ensure a difference query can be executed. func TestExecutor_Execute_Difference(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 3) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(11, 4) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 3) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(11, 4) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Difference(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Difference(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{1, 3}) { t.Fatalf("unexpected bits: %+v", bits) @@ -98,30 +98,30 @@ func TestExecutor_Execute_Difference(t *testing.T) { // Ensure an empty difference query behaves properly. func TestExecutor_Execute_Empty_Difference(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Difference()`), nil, nil); err == nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Difference()`), nil, nil); err == nil { t.Fatalf("Empty Difference query should give error, but got %v", res) } } // Ensure an intersect query can be executed. func TestExecutor_Execute_Intersect(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 1) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(11, 1) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(11, SliceWidth+2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(11, 1) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(11, SliceWidth+2) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Intersect(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Intersect(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{1, SliceWidth + 2}) { t.Fatalf("unexpected bits: %+v", bits) @@ -130,28 +130,28 @@ func TestExecutor_Execute_Intersect(t *testing.T) { // Ensure an empty intersect query behaves properly. func TestExecutor_Execute_Empty_Intersect(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Intersect()`), nil, nil); err == nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Intersect()`), nil, nil); err == nil { t.Fatalf("Empty Intersect query should give error, but got %v", res) } } // Ensure a union query can be executed. func TestExecutor_Execute_Union(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 0) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 0) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 1).MustSetBits(11, SliceWidth+2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(11, 2) + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 1).MustSetBits(11, SliceWidth+2) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Union(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Union(Bitmap(id=10), Bitmap(id=11))`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{0, 2, SliceWidth + 1, SliceWidth + 2}) { t.Fatalf("unexpected bits: %+v", bits) @@ -160,12 +160,12 @@ func TestExecutor_Execute_Union(t *testing.T) { // Ensure an empty union query behaves properly. func TestExecutor_Execute_Empty_Union(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "general", pilosa.ViewStandard, 0).MustSetBits(10, 0) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "general", pilosa.ViewStandard, 0).MustSetBits(10, 0) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Union()`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Union()`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{}) { t.Fatalf("unexpected bits: %+v", bits) @@ -174,14 +174,14 @@ func TestExecutor_Execute_Empty_Union(t *testing.T) { // Ensure a count query can be executed. func TestExecutor_Execute_Count(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).MustSetBits(10, 3) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).MustSetBits(10, 3) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).MustSetBits(10, SliceWidth+2) - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Count(Bitmap(id=10, frame=f))`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Count(Bitmap(id=10, frame=f))`), nil, nil); err != nil { t.Fatal(err) } else if res[0] != uint64(3) { t.Fatalf("unexpected n: %d", res[0]) @@ -190,16 +190,16 @@ func TestExecutor_Execute_Count(t *testing.T) { // Ensure a set query can be executed. func TestExecutor_Execute_SetBit(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - e := NewExecutor(idx.Index, NewCluster(1)) - f := idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0) - if n := f.Bitmap(11).Count(); n != 0 { + e := NewExecutor(hldr.Holder, NewCluster(1)) + f := hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0) + if n := f.Row(11).Count(); n != 0 { t.Fatalf("unexpected bitmap count: %d", n) } - if res, err := e.Execute(context.Background(), "d", MustParse(`SetBit(id=11, frame=f, profileID=1)`), nil, nil); err != nil { + if res, err := e.Execute(context.Background(), "i", MustParse(`SetBit(id=11, frame=f, columnID=1)`), nil, nil); err != nil { t.Fatal(err) } else { if !res[0].(bool) { @@ -207,10 +207,10 @@ func TestExecutor_Execute_SetBit(t *testing.T) { } } - if n := f.Bitmap(11).Count(); n != 1 { + if n := f.Row(11).Count(); n != 1 { t.Fatalf("unexpected bitmap count: %d", n) } - if res, err := e.Execute(context.Background(), "d", MustParse(`SetBit(id=11, frame=f, profileID=1)`), nil, nil); err != nil { + if res, err := e.Execute(context.Background(), "i", MustParse(`SetBit(id=11, frame=f, columnID=1)`), nil, nil); err != nil { t.Fatal(err) } else { if res[0].(bool) { @@ -219,37 +219,37 @@ func TestExecutor_Execute_SetBit(t *testing.T) { } } -// Ensure a SetBitmapAttrs() query can be executed. -func TestExecutor_Execute_SetBitmapAttrs(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() +// Ensure a SetRowAttrs() query can be executed. +func TestExecutor_Execute_SetRowAttrs(t *testing.T) { + hldr := MustOpenHolder() + defer hldr.Close() // Create frames. - db := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) - if _, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{}); err != nil { + index := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) + if _, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) - } else if _, err := db.CreateFrameIfNotExists("xxx", pilosa.FrameOptions{}); err != nil { + } else if _, err := index.CreateFrameIfNotExists("xxx", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } // Set two fields on f/10. // Also set fields on other bitmaps and frames to test isolation. - e := NewExecutor(idx.Index, NewCluster(1)) - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBitmapAttrs(id=10, frame=f, foo="bar")`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if _, err := e.Execute(context.Background(), "i", MustParse(`SetRowAttrs(id=10, frame=f, foo="bar")`), nil, nil); err != nil { t.Fatal(err) } - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBitmapAttrs(id=200, frame=f, YYY=1)`), nil, nil); err != nil { + if _, err := e.Execute(context.Background(), "i", MustParse(`SetRowAttrs(id=200, frame=f, YYY=1)`), nil, nil); err != nil { t.Fatal(err) } - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBitmapAttrs(id=10, frame=xxx, YYY=1)`), nil, nil); err != nil { + if _, err := e.Execute(context.Background(), "i", MustParse(`SetRowAttrs(id=10, frame=xxx, YYY=1)`), nil, nil); err != nil { t.Fatal(err) } - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBitmapAttrs(id=10, frame=f, baz=123, bat=true)`), nil, nil); err != nil { + if _, err := e.Execute(context.Background(), "i", MustParse(`SetRowAttrs(id=10, frame=f, baz=123, bat=true)`), nil, nil); err != nil { t.Fatal(err) } - f := idx.Frame("d", "f") - if m, err := f.BitmapAttrStore().Attrs(10); err != nil { + f := hldr.Frame("i", "f") + if m, err := f.RowAttrStore().Attrs(10); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(m, map[string]interface{}{"foo": "bar", "baz": int64(123), "bat": true}) { t.Fatalf("unexpected bitmap attr: %#v", m) @@ -258,23 +258,23 @@ func TestExecutor_Execute_SetBitmapAttrs(t *testing.T) { // Ensure a TopN() query can be executed. func TestExecutor_Execute_TopN(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - - // Set bits for bitmaps 0, 10, & 20 across two slices. - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth+2) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 5).SetBit(0, (5*SliceWidth)+100) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(10, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "other", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr := MustOpenHolder() + defer hldr.Close() + + // Set bits for rows 0, 10, & 20 across two slices. + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth+2) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 5).SetBit(0, (5*SliceWidth)+100) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(10, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "other", pilosa.ViewStandard, 0).SetBit(0, 0) // Execute query. - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(frame=f, n=2)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(frame=f, n=2)`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result[0], []pilosa.Pair{ {ID: 0, Count: 5}, @@ -284,20 +284,20 @@ func TestExecutor_Execute_TopN(t *testing.T) { } } func TestExecutor_Execute_TopN_fill(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - // Set bits for bitmaps 0, 10, & 20 across two slices. - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 2) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(1, SliceWidth+2) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(1, SliceWidth) + // Set bits for rows 0, 10, & 20 across two slices. + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 2) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(1, SliceWidth+2) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(1, SliceWidth) // Execute query. - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(frame=f, n=1)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(frame=f, n=1)`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result, []interface{}{[]pilosa.Pair{ {ID: 0, Count: 4}, @@ -308,30 +308,30 @@ func TestExecutor_Execute_TopN_fill(t *testing.T) { // Ensure func TestExecutor_Execute_TopN_fill_small(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 2).SetBit(0, 2*SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 3).SetBit(0, 3*SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 4).SetBit(0, 4*SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).SetBit(0, 2*SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 3).SetBit(0, 3*SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 4).SetBit(0, 4*SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(1, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(1, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(1, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(1, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(2, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(2, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(2, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(2, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 2).SetBit(3, 2*SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 2).SetBit(3, 2*SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).SetBit(3, 2*SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).SetBit(3, 2*SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 3).SetBit(4, 3*SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 3).SetBit(4, 3*SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 3).SetBit(4, 3*SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 3).SetBit(4, 3*SliceWidth+1) // Execute query. - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(frame=f, n=1)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(frame=f, n=1)`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result, []interface{}{[]pilosa.Pair{ {ID: 0, Count: 5}, @@ -342,27 +342,27 @@ func TestExecutor_Execute_TopN_fill_small(t *testing.T) { // Ensure a TopN() query with a source bitmap can be executed. func TestExecutor_Execute_TopN_Src(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - - // Set bits for bitmaps 0, 10, & 20 across two slices. - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth+2) - - // Create an intersecting bitmap. - idx.MustCreateFragmentIfNotExists("d", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth) - idx.MustCreateFragmentIfNotExists("d", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth+1) - idx.MustCreateFragmentIfNotExists("d", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth+2) + hldr := MustOpenHolder() + defer hldr.Close() + + // Set bits for rows 0, 10, & 20 across two slices. + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(0, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(20, SliceWidth+2) + + // Create an intersecting row. + hldr.MustCreateFragmentIfNotExists("i", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth) + hldr.MustCreateFragmentIfNotExists("i", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth+1) + hldr.MustCreateFragmentIfNotExists("i", "other", pilosa.ViewStandard, 1).SetBit(100, SliceWidth+2) // Execute query. - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(Bitmap(id=100, frame=other), frame=f, n=3)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(Bitmap(id=100, frame=other), frame=f, n=3)`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result, []interface{}{[]pilosa.Pair{ {ID: 20, Count: 3}, @@ -376,17 +376,17 @@ func TestExecutor_Execute_TopN_Src(t *testing.T) { //Ensure TopN handles Attribute filters func TestExecutor_Execute_TopN_Attr(t *testing.T) { // - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) - if err := idx.Frame("d", "f").BitmapAttrStore().SetAttrs(10, map[string]interface{}{"category": int64(123)}); err != nil { + if err := hldr.Frame("i", "f").RowAttrStore().SetAttrs(10, map[string]interface{}{"category": int64(123)}); err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(frame="f", n=1, field="category", filters=[123])`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(frame="f", n=1, field="category", filters=[123])`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result, []interface{}{[]pilosa.Pair{ {ID: 10, Count: 1}, @@ -399,17 +399,17 @@ func TestExecutor_Execute_TopN_Attr(t *testing.T) { //Ensure TopN handles Attribute filters with source bitmap func TestExecutor_Execute_TopN_Attr_Src(t *testing.T) { // - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 0) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).SetBit(0, 1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 0) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).SetBit(0, 1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).SetBit(10, SliceWidth) - if err := idx.Frame("d", "f").BitmapAttrStore().SetAttrs(10, map[string]interface{}{"category": uint64(123)}); err != nil { + if err := hldr.Frame("i", "f").RowAttrStore().SetAttrs(10, map[string]interface{}{"category": uint64(123)}); err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, NewCluster(1)) - if result, err := e.Execute(context.Background(), "d", MustParse(`TopN(Bitmap(id=10,frame=f),frame="f", n=1, field="category", filters=[123])`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if result, err := e.Execute(context.Background(), "i", MustParse(`TopN(Bitmap(id=10,frame=f),frame="f", n=1, field="category", filters=[123])`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(result, []interface{}{[]pilosa.Pair{ {ID: 10, Count: 1}, @@ -421,14 +421,14 @@ func TestExecutor_Execute_TopN_Attr_Src(t *testing.T) { // Ensure a range query can be executed. func TestExecutor_Execute_Range(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - // Create database. - db := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) + // Create index. + index := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) // Create frame. - f, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) + f, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) if err != nil { t.Fatal(err) } else if err := f.SetTimeQuantum(pilosa.TimeQuantum("YMDH")); err != nil { @@ -445,10 +445,10 @@ func TestExecutor_Execute_Range(t *testing.T) { f.MustSetBit(pilosa.ViewStandard, 1, 2, MustParseTimePtr("1999-12-30 00:00")) // too early f.MustSetBit(pilosa.ViewStandard, 1, 2, MustParseTimePtr("2002-02-01 00:00")) // too late - f.MustSetBit(pilosa.ViewStandard, 10, 2, MustParseTimePtr("2001-01-01 00:00")) // different bitmap + f.MustSetBit(pilosa.ViewStandard, 10, 2, MustParseTimePtr("2001-01-01 00:00")) // different row - e := NewExecutor(idx.Index, NewCluster(1)) - if res, err := e.Execute(context.Background(), "d", MustParse(`Range(id=1, frame=f, start="1999-12-31T00:00", end="2002-01-01T03:00")`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, NewCluster(1)) + if res, err := e.Execute(context.Background(), "i", MustParse(`Range(id=1, frame=f, start="1999-12-31T00:00", end="2002-01-01T03:00")`), nil, nil); err != nil { t.Fatal(err) } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{2, 3, 4, 5, 6, 7}) { t.Fatalf("unexpected bits: %+v", bits) @@ -465,14 +465,12 @@ func TestExecutor_Execute_Remote_Bitmap(t *testing.T) { c.Nodes[1].Host = s.Host() // Mock secondary server's executor to verify arguments and return a bitmap. - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != `d` { - t.Fatalf("unexpected db: %s", db) + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != "i" { + t.Fatalf("unexpected index: %s", index) } else if query.String() != `Bitmap(frame="f", id=10)` { t.Fatalf("unexpected query: %s", query.String()) - // NOTE: while the following is technically incorrect (it should be {0, 2}) because the calling node doesn't know about slice 2 yet, - // we are ok with this and assuming that the calling node will become aware of slice 2 via inter-node messaging - } else if !reflect.DeepEqual(slices, []uint64{0}) { + } else if !reflect.DeepEqual(slices, []uint64{1}) { t.Fatalf("unexpected slices: %+v", slices) } @@ -487,14 +485,14 @@ func TestExecutor_Execute_Remote_Bitmap(t *testing.T) { // Create local executor data. // The local node owns slice 1. - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(10, (1*SliceWidth)+1) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 1).MustSetBits(10, (1*SliceWidth)+1) - e := NewExecutor(idx.Index, c) - if res, err := e.Execute(context.Background(), "d", MustParse(`Bitmap(id=10, frame=f)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, c) + if res, err := e.Execute(context.Background(), "i", MustParse(`Bitmap(id=10, frame=f)`), nil, nil); err != nil { t.Fatal(err) - } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{1, 2, (1 * SliceWidth) + 1, 2*SliceWidth + 4}) { + } else if bits := res[0].(*pilosa.Bitmap).Bits(); !reflect.DeepEqual(bits, []uint64{1, 2, 2*SliceWidth + 4}) { t.Fatalf("unexpected bits: %+v", bits) } } @@ -509,18 +507,18 @@ func TestExecutor_Execute_Remote_Count(t *testing.T) { c.Nodes[1].Host = s.Host() // Mock secondary server's executor to return a count. - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return []interface{}{uint64(10)}, nil } // Create local executor data. The local node owns slice 1. - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(10, (1*SliceWidth)+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(10, (1*SliceWidth)+2) + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).MustSetBits(10, (2*SliceWidth)+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).MustSetBits(10, (2*SliceWidth)+2) - e := NewExecutor(idx.Index, c) - if res, err := e.Execute(context.Background(), "d", MustParse(`Count(Bitmap(id=10, frame=f))`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, c) + if res, err := e.Execute(context.Background(), "i", MustParse(`Count(Bitmap(id=10, frame=f))`), nil, nil); err != nil { t.Fatal(err) } else if res[0] != uint64(12) { t.Fatalf("unexpected n: %d", res[0]) @@ -539,10 +537,10 @@ func TestExecutor_Execute_Remote_SetBit(t *testing.T) { // Mock secondary server's executor to verify arguments. var remoteCalled bool - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != `d` { - t.Fatalf("unexpected db: %s", db) - } else if query.String() != `SetBit(frame="f", id=10, profileID=2)` { + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != `i` { + t.Fatalf("unexpected index: %s", index) + } else if query.String() != `SetBit(columnID=2, frame="f", id=10)` { t.Fatalf("unexpected query: %s", query.String()) } remoteCalled = true @@ -550,21 +548,21 @@ func TestExecutor_Execute_Remote_SetBit(t *testing.T) { } // Create local executor data. - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() // Create frame. - if _, err := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}).CreateFrame("f", pilosa.FrameOptions{}); err != nil { + if _, err := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}).CreateFrame("f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, c) - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBit(id=10, frame=f, profileID=2)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, c) + if _, err := e.Execute(context.Background(), "i", MustParse(`SetBit(id=10, frame=f, columnID=2)`), nil, nil); err != nil { t.Fatal(err) } - // Verify that one bit is set on both node's index. - if n := idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0).Bitmap(10).Count(); n != 1 { + // Verify that one bit is set on both node's holder. + if n := hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0).Row(10).Count(); n != 1 { t.Fatalf("unexpected local count: %d", n) } if !remoteCalled { @@ -584,10 +582,10 @@ func TestExecutor_Execute_Remote_SetBit_With_Timestamp(t *testing.T) { // Mock secondary server's executor to verify arguments. var remoteCalled bool - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != `d` { - t.Fatalf("unexpected db: %s", db) - } else if query.String() != `SetBit(frame="f", id=10, profileID=2, timestamp="2016-12-11T10:09")` { + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != `i` { + t.Fatalf("unexpected index: %s", index) + } else if query.String() != `SetBit(columnID=2, frame="f", id=10, timestamp="2016-12-11T10:09")` { t.Fatalf("unexpected query: %s", query.String()) } remoteCalled = true @@ -595,23 +593,23 @@ func TestExecutor_Execute_Remote_SetBit_With_Timestamp(t *testing.T) { } // Create local executor data. - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() // Create frame. - if f, err := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}).CreateFrame("f", pilosa.FrameOptions{}); err != nil { + if f, err := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}).CreateFrame("f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } else if err := f.SetTimeQuantum("Y"); err != nil { t.Fatal(err) } - e := NewExecutor(idx.Index, c) - if _, err := e.Execute(context.Background(), "d", MustParse(`SetBit(id=10, frame=f, profileID=2, timestamp="2016-12-11T10:09")`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, c) + if _, err := e.Execute(context.Background(), "i", MustParse(`SetBit(id=10, frame=f, columnID=2, timestamp="2016-12-11T10:09")`), nil, nil); err != nil { t.Fatal(err) } - // Verify that one bit is set on both node's index. - if n := idx.MustCreateFragmentIfNotExists("d", "f", "standard_2016", 0).Bitmap(10).Count(); n != 1 { + // Verify that one bit is set on both node's holder. + if n := hldr.MustCreateFragmentIfNotExists("i", "f", "standard_2016", 0).Row(10).Count(); n != 1 { t.Fatalf("unexpected local count: %d", n) } if !remoteCalled { @@ -630,10 +628,10 @@ func TestExecutor_Execute_Remote_TopN(t *testing.T) { // Mock secondary server's executor to verify arguments and return a bitmap. var remoteExecN int - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != `d` { - t.Fatalf("unexpected db: %s", db) - } else if !reflect.DeepEqual(slices, []uint64{0, 2}) { + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != "i" { + t.Fatalf("unexpected index: %s", index) + } else if !reflect.DeepEqual(slices, []uint64{1, 3}) { t.Fatalf("unexpected slices: %+v", slices) } @@ -661,14 +659,14 @@ func TestExecutor_Execute_Remote_TopN(t *testing.T) { }}, nil } - // Create local executor data on slice 1 & 3. - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 1).MustSetBits(30, (1*SliceWidth)+1) - idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 3).MustSetBits(30, (3*SliceWidth)+2) + // Create local executor data on slice 2 & 4. + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 2).MustSetBits(30, (2*SliceWidth)+1) + hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 4).MustSetBits(30, (4*SliceWidth)+2) - e := NewExecutor(idx.Index, c) - if res, err := e.Execute(context.Background(), "d", MustParse(`TopN(frame=f, n=3)`), nil, nil); err != nil { + e := NewExecutor(hldr.Holder, c) + if res, err := e.Execute(context.Background(), "i", MustParse(`TopN(frame=f, n=3)`), nil, nil); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(res, []interface{}{[]pilosa.Pair{ {ID: 0, Count: 5}, @@ -686,9 +684,9 @@ type Executor struct { // NewExecutor returns a new instance of Executor. // The executor always matches the hostname of the first cluster node. -func NewExecutor(index *pilosa.Index, cluster *pilosa.Cluster) *Executor { +func NewExecutor(holder *pilosa.Holder, cluster *pilosa.Cluster) *Executor { e := &Executor{Executor: pilosa.NewExecutor()} - e.Index = index + e.Holder = holder e.Cluster = cluster e.Host = cluster.Nodes[0].Host return e diff --git a/fragment.go b/fragment.go index b31769251..d3f7ddb8b 100644 --- a/fragment.go +++ b/fragment.go @@ -29,7 +29,7 @@ import ( ) const ( - // SliceWidth is the number of profile IDs in a slice. + // SliceWidth is the number of column IDs in a slice. SliceWidth = 1048576 // SnapshotExt is the file extension used for an in-process snapshot. @@ -41,7 +41,7 @@ const ( // CacheExt is the file extension for persisted cache ids. CacheExt = ".cache" - // HashBlockSize is the number of bitmaps in a merkle hash block. + // HashBlockSize is the number of rows in a merkle hash block. HashBlockSize = 100 ) @@ -50,12 +50,12 @@ const ( DefaultFragmentMaxOpN = 2000 ) -// Fragment represents the intersection of a frame and slice in a database. +// Fragment represents the intersection of a frame and slice in an index. type Fragment struct { mu sync.Mutex // Composite identifiers - db string + index string frame string view string slice uint64 @@ -67,13 +67,13 @@ type Fragment struct { storageData []byte opN int // number of ops since snapshot - // Cache for bitmap counts. + // Cache for row counts. cacheType string // passed in by frame cache Cache cacheSize uint32 - // Cache containing full bitmaps (not just counts). - bitmapCache BitmapCache + // Cache containing full rows (not just counts). + rowCache BitmapCache // Cached checksums for each block. checksums map[int][]byte @@ -86,18 +86,18 @@ type Fragment struct { // Writer used for out-of-band log entries. LogOutput io.Writer - // Bitmap attribute storage. + // Row attribute storage. // This is set by the parent frame unless overridden for testing. - BitmapAttrStore *AttrStore + RowAttrStore *AttrStore stats StatsClient } // NewFragment returns a new instance of Fragment. -func NewFragment(path, db, frame, view string, slice uint64) *Fragment { +func NewFragment(path, index, frame, view string, slice uint64) *Fragment { return &Fragment{ path: path, - db: db, + index: index, frame: frame, view: view, slice: slice, @@ -117,8 +117,8 @@ func (f *Fragment) Path() string { return f.path } // CachePath returns the path to the fragment's cache data. func (f *Fragment) CachePath() string { return f.path + CacheExt } -// DB returns the database the fragment was initialized with. -func (f *Fragment) DB() string { return f.db } +// Index returns the index that the fragment was initialized with. +func (f *Fragment) Index() string { return f.index } // Frame returns the frame the fragment was initialized with. func (f *Fragment) Frame() string { return f.frame } @@ -144,7 +144,7 @@ func (f *Fragment) Open() error { return err } - // Fill cache with bitmaps persisted to disk. + // Fill cache with rows persisted to disk. if err := f.openCache(); err != nil { return err } @@ -213,13 +213,13 @@ func (f *Fragment) openStorage() error { // Attach the file to the bitmap to act as a write-ahead log. f.storage.OpWriter = f.file - f.bitmapCache = &SimpleCache{make(map[uint64]*Bitmap)} + f.rowCache = &SimpleCache{make(map[uint64]*Bitmap)} return nil } -// openCache initializes the cache from bitmap ids persisted to disk. +// openCache initializes the cache from row ids persisted to disk. func (f *Fragment) openCache() error { // Determine cache type from frame name. switch f.cacheType { @@ -247,12 +247,12 @@ func (f *Fragment) openCache() error { return nil } - // Read in all bitmaps by ID. + // Read in all rows by ID. // This will cause them to be added to the cache. - for _, bitmapID := range pb.BitmapIDs { - //n := f.storage.CountRange(bitmapID*SliceWidth, (bitmapID+1)*SliceWidth) - n := f.bitmap(bitmapID, true, true).Count() - f.cache.BulkAdd(bitmapID, n) + for _, id := range pb.IDs { + //n := f.storage.CountRange(id*SliceWidth, (id+1)*SliceWidth) + n := f.row(id, true, true).Count() + f.cache.BulkAdd(id, n) } f.cache.Invalidate() @@ -314,17 +314,16 @@ func (f *Fragment) closeStorage() error { // logger returns a logger instance for the fragment.nt. func (f *Fragment) logger() *log.Logger { return log.New(f.LogOutput, "", log.LstdFlags) } -// Bitmap returns a bitmap by ID. -func (f *Fragment) Bitmap(bitmapID uint64) *Bitmap { +// Row returns a row by ID. +func (f *Fragment) Row(rowID uint64) *Bitmap { f.mu.Lock() defer f.mu.Unlock() - return f.bitmap(bitmapID, true, true) + return f.row(rowID, true, true) } -func (f *Fragment) bitmap(bitmapID uint64, checkBitmapCache bool, updateBitmapCache bool) *Bitmap { - - if checkBitmapCache { - r, ok := f.bitmapCache.Fetch(bitmapID) +func (f *Fragment) row(rowID uint64, checkRowCache bool, updateRowCache bool) *Bitmap { + if checkRowCache { + r, ok := f.rowCache.Fetch(rowID) if ok && r != nil { return r } @@ -332,11 +331,11 @@ func (f *Fragment) bitmap(bitmapID uint64, checkBitmapCache bool, updateBitmapCa // Only use a subset of the containers. // NOTE: The start & end ranges must be divisible by - data := f.storage.OffsetRange(f.slice*SliceWidth, bitmapID*SliceWidth, (bitmapID+1)*SliceWidth) + data := f.storage.OffsetRange(f.slice*SliceWidth, rowID*SliceWidth, (rowID+1)*SliceWidth) // Reference bitmap subrange in storage. // We Clone() data because otherwise bm will contains pointers to containers in storage. - // This causes unexpected results when we cache the bitmap and try to use it later. + // This causes unexpected results when we cache the row and try to use it later. bm := &Bitmap{ segments: []BitmapSegment{{ data: *data.Clone(), @@ -346,25 +345,25 @@ func (f *Fragment) bitmap(bitmapID uint64, checkBitmapCache bool, updateBitmapCa } bm.InvalidateCount() - if updateBitmapCache { - f.bitmapCache.Add(bitmapID, bm) + if updateRowCache { + f.rowCache.Add(rowID, bm) } return bm } -// SetBit sets a bit for a given profile & bitmap within the fragment. +// SetBit sets a bit for a given column & row within the fragment. // This updates both the on-disk storage and the in-cache bitmap. -func (f *Fragment) SetBit(bitmapID, profileID uint64) (changed bool, err error) { +func (f *Fragment) SetBit(rowID, columnID uint64) (changed bool, err error) { f.mu.Lock() defer f.mu.Unlock() - return f.setBit(bitmapID, profileID) + return f.setBit(rowID, columnID) } -func (f *Fragment) setBit(bitmapID, profileID uint64) (changed bool, err error) { +func (f *Fragment) setBit(rowID, columnID uint64) (changed bool, err error) { changed = false // Determine the position of the bit in the storage. - pos, err := f.pos(bitmapID, profileID) + pos, err := f.pos(rowID, columnID) if err != nil { return false, err } @@ -380,37 +379,37 @@ func (f *Fragment) setBit(bitmapID, profileID uint64) (changed bool, err error) } // Invalidate block checksum. - delete(f.checksums, int(bitmapID/HashBlockSize)) + delete(f.checksums, int(rowID/HashBlockSize)) // Increment number of operations until snapshot is required. if err := f.incrementOpN(); err != nil { return false, err } - // Get the bitmap from bitmapCache or fragment.storage. - bm := f.bitmap(bitmapID, true, true) - bm.SetBit(profileID) + // Get the row from row cache or fragment.storage. + bm := f.row(rowID, true, true) + bm.SetBit(columnID) // Update the cache. - f.cache.Add(bitmapID, bm.Count()) + f.cache.Add(rowID, bm.Count()) f.stats.Count("setN", 1) return changed, nil } -// ClearBit clears a bit for a given profile & bitmap within the fragment. +// ClearBit clears a bit for a given column & row within the fragment. // This updates both the on-disk storage and the in-cache bitmap. -func (f *Fragment) ClearBit(bitmapID, profileID uint64) (bool, error) { +func (f *Fragment) ClearBit(rowID, columnID uint64) (bool, error) { f.mu.Lock() defer f.mu.Unlock() - return f.clearBit(bitmapID, profileID) + return f.clearBit(rowID, columnID) } -func (f *Fragment) clearBit(bitmapID, profileID uint64) (changed bool, err error) { +func (f *Fragment) clearBit(rowID, columnID uint64) (changed bool, err error) { changed = false // Determine the position of the bit in the storage. - pos, err := f.pos(bitmapID, profileID) + pos, err := f.pos(rowID, columnID) if err != nil { return false, err } @@ -426,38 +425,38 @@ func (f *Fragment) clearBit(bitmapID, profileID uint64) (changed bool, err error } // Invalidate block checksum. - delete(f.checksums, int(bitmapID/HashBlockSize)) + delete(f.checksums, int(rowID/HashBlockSize)) // Increment number of operations until snapshot is required. if err := f.incrementOpN(); err != nil { return false, err } - // Get the bitmap from bitmapCache or fragment.storage. - bm := f.bitmap(bitmapID, true, true) - bm.ClearBit(profileID) + // Get the row from cache or fragment.storage. + bm := f.row(rowID, true, true) + bm.ClearBit(columnID) // Update the cache. - f.cache.Add(bitmapID, bm.Count()) + f.cache.Add(rowID, bm.Count()) f.stats.Count("clearN", 1) return changed, nil } -// pos translates the bitmap ID and profile ID into a position in the storage bitmap. -func (f *Fragment) pos(bitmapID, profileID uint64) (uint64, error) { - // Return an error if the profile ID is out of the range of the fragment's slice. - minProfileID := f.slice * SliceWidth - if profileID < minProfileID || profileID >= minProfileID+SliceWidth { - return 0, errors.New("profile out of bounds") +// pos translates the row ID and column ID into a position in the storage bitmap. +func (f *Fragment) pos(rowID, columnID uint64) (uint64, error) { + // Return an error if the column ID is out of the range of the fragment's slice. + minColumnID := f.slice * SliceWidth + if columnID < minColumnID || columnID >= minColumnID+SliceWidth { + return 0, errors.New("column out of bounds") } - return Pos(bitmapID, profileID), nil + return Pos(rowID, columnID), nil } // ForEachBit executes fn for every bit set in the fragment. // Errors returned from fn are passed through. -func (f *Fragment) ForEachBit(fn func(bitmapID, profileID uint64) error) error { +func (f *Fragment) ForEachBit(fn func(rowID, columnID uint64) error) error { f.mu.Lock() defer f.mu.Unlock() @@ -474,15 +473,15 @@ func (f *Fragment) ForEachBit(fn func(bitmapID, profileID uint64) error) error { return err } -// Top returns the top bitmaps from the fragment. -// If opt.Src is specified then only bitmaps which intersect src are returned. -// If opt.FilterValues exist then the bitmap attribute specified by field is matched. +// Top returns the top rows from the fragment. +// If opt.Src is specified then only rows which intersect src are returned. +// If opt.FilterValues exist then the row attribute specified by field is matched. func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { - // Retrieve pairs. If no bitmap ids specified then return from cache. - pairs := f.topBitmapPairs(opt.BitmapIDs) + // Retrieve pairs. If no row ids specified then return from cache. + pairs := f.topBitmapPairs(opt.RowIDs) - // If BitmapIDs are provided, we don't want to truncate the result set - if len(opt.BitmapIDs) > 0 { + // If row ids are provided, we don't want to truncate the result set + if len(opt.RowIDs) > 0 { opt.N = 0 } @@ -509,9 +508,9 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { // Iterate over rankings and add to results until we have enough. results := &PairHeap{} for _, pair := range pairs { - bitmapID, cnt := pair.ID, pair.Count + rowID, cnt := pair.ID, pair.Count - // Ignore empty bitmaps. + // Ignore empty rows. if cnt <= 0 { continue } @@ -531,7 +530,7 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { // Apply filter, if set. if filters != nil { - attr, err := f.BitmapAttrStore.Attrs(bitmapID) + attr, err := f.RowAttrStore.Attrs(rowID) if err != nil { return nil, err } else if attr == nil { @@ -548,7 +547,7 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { // Calculate count and append. count := cnt if opt.Src != nil { - count = opt.Src.IntersectionCount(f.Bitmap(bitmapID)) + count = opt.Src.IntersectionCount(f.Row(rowID)) } if count == 0 { continue @@ -566,7 +565,7 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { } } - heap.Push(results, Pair{ID: bitmapID, Count: count}) + heap.Push(results, Pair{ID: rowID, Count: count}) // If we reach the requested number of pairs and we are not computing // intersections then simply exit. If we are intersecting then sort @@ -584,20 +583,20 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { // If it's too low then don't try finding anymore pairs. threshold := results.Pairs[0].Count - // If the bitmap doesn't have enough bits set before the intersection - // then we can assume that any remaining bitmaps also have a count too low. + // If the row doesn't have enough bits set before the intersection + // then we can assume that any remaining rows also have a count too low. if threshold < opt.MinThreshold || cnt < threshold { break } // Calculate the intersecting bit count and skip if it's below our - // last bitmap in our current result set. - count := opt.Src.IntersectionCount(f.Bitmap(bitmapID)) + // last row in our current result set. + count := opt.Src.IntersectionCount(f.Row(rowID)) if count < threshold { continue } - heap.Push(results, Pair{ID: bitmapID, Count: count}) + heap.Push(results, Pair{ID: rowID, Count: count}) } //Pop first opt.N elements out of heap @@ -611,32 +610,32 @@ func (f *Fragment) Top(opt TopOptions) ([]Pair, error) { return r, nil } -func (f *Fragment) topBitmapPairs(bitmapIDs []uint64) []BitmapPair { - // If no specific bitmaps are requested, retrieve top bitmaps. - if len(bitmapIDs) == 0 { +func (f *Fragment) topBitmapPairs(rowIDs []uint64) []BitmapPair { + // If no specific rows are requested, retrieve top rows. + if len(rowIDs) == 0 { f.mu.Lock() defer f.mu.Unlock() f.cache.Invalidate() return f.cache.Top() } - // Otherwise retrieve specific bitmaps. - pairs := make([]BitmapPair, 0, len(bitmapIDs)) - for _, bitmapID := range bitmapIDs { + // Otherwise retrieve specific rows. + pairs := make([]BitmapPair, 0, len(rowIDs)) + for _, rowID := range rowIDs { // Look up cache first, if available. - if n := f.cache.Get(bitmapID); n > 0 { + if n := f.cache.Get(rowID); n > 0 { pairs = append(pairs, BitmapPair{ - ID: bitmapID, + ID: rowID, Count: n, }) continue } - bm := f.Bitmap(bitmapID) + bm := f.Row(rowID) if bm.Count() > 0 { // Otherwise load from storage. pairs = append(pairs, BitmapPair{ - ID: bitmapID, + ID: rowID, Count: bm.Count(), }) } @@ -647,14 +646,14 @@ func (f *Fragment) topBitmapPairs(bitmapIDs []uint64) []BitmapPair { // TopOptions represents options passed into the Top() function. type TopOptions struct { - // Number of bitmaps to return. + // Number of rows to return. N int // Bitmap to intersect with. Src *Bitmap - // Specific bitmaps to filter against. - BitmapIDs []uint64 + // Specific rows to filter against. + RowIDs []uint64 MinThreshold uint64 // Filter field name & values. @@ -768,14 +767,14 @@ func (f *Fragment) readContiguousChecksums(a *[]FragmentBlock, blockID int) (n i } } -// BlockData returns bits in a block as bitmap & profile ID pairs. -func (f *Fragment) BlockData(id int) (bitmapIDs, profileIDs []uint64) { +// BlockData returns bits in a block as row & column ID pairs. +func (f *Fragment) BlockData(id int) (rowIDs, columnIDs []uint64) { f.mu.Lock() defer f.mu.Unlock() f.storage.ForEachRange(uint64(id)*HashBlockSize*SliceWidth, (uint64(id)+1)*HashBlockSize*SliceWidth, func(i uint64) { - bitmapIDs = append(bitmapIDs, i/SliceWidth) - profileIDs = append(profileIDs, i%SliceWidth) + rowIDs = append(rowIDs, i/SliceWidth) + columnIDs = append(columnIDs, i%SliceWidth) }) return } @@ -789,8 +788,8 @@ func (f *Fragment) BlockData(id int) (bitmapIDs, profileIDs []uint64) { func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, err error) { // Ensure that all pair sets are of equal length. for i := range data { - if len(data[i].BitmapIDs) != len(data[i].ProfileIDs) { - return nil, nil, fmt.Errorf("pair set mismatch(idx=%d): %d != %d", i, len(data[i].BitmapIDs), len(data[i].ProfileIDs)) + if len(data[i].RowIDs) != len(data[i].ColumnIDs) { + return nil, nil, fmt.Errorf("pair set mismatch(idx=%d): %d != %d", i, len(data[i].RowIDs), len(data[i].ColumnIDs)) } } @@ -801,22 +800,22 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e sets = make([]PairSet, len(data)+1) clears = make([]PairSet, len(data)+1) - // Limit upper bitmap/profile pair. - maxBitmapID := uint64(id+1) * HashBlockSize - maxProfileID := uint64(SliceWidth) + // Limit upper row/column pair. + maxRowID := uint64(id+1) * HashBlockSize + maxColumnID := uint64(SliceWidth) // Create buffered iterator for local block. itrs := make([]*BufIterator, 1, len(data)+1) itrs[0] = NewBufIterator( NewLimitIterator( - NewRoaringIterator(f.storage.Iterator()), maxBitmapID, maxProfileID, + NewRoaringIterator(f.storage.Iterator()), maxRowID, maxColumnID, ), ) // Append buffered iterators for each incoming block. for i := range data { - var itr Iterator = NewSliceIterator(data[i].BitmapIDs, data[i].ProfileIDs) - itr = NewLimitIterator(itr, maxBitmapID, maxProfileID) + var itr Iterator = NewSliceIterator(data[i].RowIDs, data[i].ColumnIDs) + itr = NewLimitIterator(itr, maxRowID, maxColumnID) itrs = append(itrs, NewBufIterator(itr)) } @@ -833,8 +832,8 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e values := make([]bool, len(itrs)) for { var min struct { - bitmapID uint64 - profileID uint64 + rowID uint64 + columnID uint64 } // Find the lowest pair. @@ -844,9 +843,9 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e if eof { // no more data continue } else if !hasData { // first pair - min.bitmapID, min.profileID, hasData = bid, pid, true - } else if bid < min.bitmapID || (bid == min.bitmapID && pid < min.profileID) { // lower pair - min.bitmapID, min.profileID = bid, pid + min.rowID, min.columnID, hasData = bid, pid, true + } else if bid < min.rowID || (bid == min.rowID && pid < min.columnID) { // lower pair + min.rowID, min.columnID = bid, pid } } @@ -860,7 +859,7 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e for i, itr := range itrs { bid, pid, eof := itr.Next() - values[i] = !eof && bid == min.bitmapID && pid == min.profileID + values[i] = !eof && bid == min.rowID && pid == min.columnID if values[i] { setN++ // set } else { @@ -880,25 +879,25 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e // Append to either the set or clear diff. if newValue { - sets[i].BitmapIDs = append(sets[i].BitmapIDs, min.bitmapID) - sets[i].ProfileIDs = append(sets[i].ProfileIDs, min.profileID) + sets[i].RowIDs = append(sets[i].RowIDs, min.rowID) + sets[i].ColumnIDs = append(sets[i].ColumnIDs, min.columnID) } else { - clears[i].BitmapIDs = append(sets[i].BitmapIDs, min.bitmapID) - clears[i].ProfileIDs = append(sets[i].ProfileIDs, min.profileID) + clears[i].RowIDs = append(sets[i].RowIDs, min.rowID) + clears[i].ColumnIDs = append(sets[i].ColumnIDs, min.columnID) } } } // Set local bits. - for i := range sets[0].ProfileIDs { - if _, err := f.setBit(sets[0].BitmapIDs[i], (f.Slice()*SliceWidth)+sets[0].ProfileIDs[i]); err != nil { + for i := range sets[0].ColumnIDs { + if _, err := f.setBit(sets[0].RowIDs[i], (f.Slice()*SliceWidth)+sets[0].ColumnIDs[i]); err != nil { return nil, nil, err } } // Clear local bits. - for i := range clears[0].ProfileIDs { - if _, err := f.clearBit(clears[0].BitmapIDs[i], (f.Slice()*SliceWidth)+clears[0].ProfileIDs[i]); err != nil { + for i := range clears[0].ColumnIDs { + if _, err := f.clearBit(clears[0].RowIDs[i], (f.Slice()*SliceWidth)+clears[0].ColumnIDs[i]); err != nil { return nil, nil, err } } @@ -908,12 +907,12 @@ func (f *Fragment) MergeBlock(id int, data []PairSet) (sets, clears []PairSet, e // Import bulk imports a set of bits and then snapshots the storage. // This does not affect the fragment's cache. -func (f *Fragment) Import(bitmapIDs, profileIDs []uint64) error { +func (f *Fragment) Import(rowIDs, columnIDs []uint64) error { f.mu.Lock() defer f.mu.Unlock() - // Verify that there are an equal number of bitmap ids and profile ids. - if len(bitmapIDs) != len(profileIDs) { - return fmt.Errorf("mismatch of bitmap/profile len: %d != %d", len(bitmapIDs), len(profileIDs)) + // Verify that there are an equal number of row ids and column ids. + if len(rowIDs) != len(columnIDs) { + return fmt.Errorf("mismatch of row/column len: %d != %d", len(rowIDs), len(columnIDs)) } // Disconnect op writer so we don't append updates. @@ -924,11 +923,11 @@ func (f *Fragment) Import(bitmapIDs, profileIDs []uint64) error { lastID := uint64(0) if err := func() error { set := make(map[uint64]struct{}) - for i := range bitmapIDs { - bitmapID, profileID := bitmapIDs[i], profileIDs[i] + for i := range rowIDs { + rowID, columnID := rowIDs[i], columnIDs[i] // Determine the position of the bit in the storage. - pos, err := f.pos(bitmapID, profileID) + pos, err := f.pos(rowID, columnID) if err != nil { return err } @@ -942,21 +941,21 @@ func (f *Fragment) Import(bitmapIDs, profileIDs []uint64) error { // import optimization to avoid linear foreach calls // slight risk of concurrent cache counter being off but // no real danger - if i == 0 || bitmapID != lastID { - lastID = bitmapID - set[bitmapID] = struct{}{} + if i == 0 || rowID != lastID { + lastID = rowID + set[rowID] = struct{}{} } // Invalidate block checksum. - delete(f.checksums, int(bitmapID/HashBlockSize)) + delete(f.checksums, int(rowID/HashBlockSize)) } - // Update cache counts for all bitmaps. - for bitmapID := range set { - // Import should ALWAYS have bitmap() load a new bm from fragment.storage - // because the bitmap that's in bitmapCache hasn't been updated with + // Update cache counts for all rows. + for rowID := range set { + // Import should ALWAYS have row() load a new bm from fragment.storage + // because the row that's in rowCache hasn't been updated with // this import's data. - f.cache.BulkAdd(bitmapID, f.bitmap(bitmapID, false, false).Count()) + f.cache.BulkAdd(rowID, f.row(rowID, false, false).Count()) } f.cache.Invalidate() @@ -1003,8 +1002,8 @@ func track(start time.Time, message string, stats StatsClient, logger *log.Logge func (f *Fragment) snapshot() error { logger := f.logger() - logger.Printf("fragment: snapshotting %s/%s/%s/%d", f.db, f.frame, f.view, f.slice) - completeMessage := fmt.Sprintf("fragment: snapshot complete %s/%s/%s/%d", f.db, f.frame, f.view, f.slice) + logger.Printf("fragment: snapshotting %s/%s/%s/%d", f.index, f.frame, f.view, f.slice) + completeMessage := fmt.Sprintf("fragment: snapshot complete %s/%s/%s/%d", f.index, f.frame, f.view, f.slice) start := time.Now() defer track(start, completeMessage, f.stats, logger) @@ -1064,13 +1063,11 @@ func (f *Fragment) flushCache() error { return nil } - // Retrieve a list of bitmap ids from the cache. - bitmapIDs := f.cache.BitmapIDs() + // Retrieve a list of row ids from the cache. + ids := f.cache.IDs() // Marshal cache data to bytes. - buf, err := proto.Marshal(&internal.Cache{ - BitmapIDs: bitmapIDs, - }) + buf, err := proto.Marshal(&internal.Cache{IDs: ids}) if err != nil { return err } @@ -1256,7 +1253,7 @@ func (f *Fragment) readCacheFromArchive(r io.Reader) error { return nil } -// FragmentBlock represents info about a subsection of the bitmaps in a block. +// FragmentBlock represents info about a subsection of the rows in a block. // This is used for comparing data in remote blocks for active anti-entropy. type FragmentBlock struct { ID int `json:"id"` @@ -1312,7 +1309,7 @@ func (s *FragmentSyncer) isClosing() bool { // then merges any blocks which have differences. func (s *FragmentSyncer) SyncFragment() error { // Determine replica set. - nodes := s.Cluster.FragmentNodes(s.Fragment.DB(), s.Fragment.Slice()) + nodes := s.Cluster.FragmentNodes(s.Fragment.Index(), s.Fragment.Slice()) if len(nodes) == 1 { return nil } @@ -1332,7 +1329,7 @@ func (s *FragmentSyncer) SyncFragment() error { if err != nil { return err } - blocks, err := client.FragmentBlocks(context.Background(), s.Fragment.DB(), s.Fragment.Frame(), s.Fragment.View(), s.Fragment.Slice()) + blocks, err := client.FragmentBlocks(context.Background(), s.Fragment.Index(), s.Fragment.Frame(), s.Fragment.View(), s.Fragment.Slice()) if err != nil && err != ErrFragmentNotFound { return err } @@ -1389,7 +1386,7 @@ func (s *FragmentSyncer) SyncFragment() error { return nil } -// syncBlock sends and receives all bitmaps for a given block. +// syncBlock sends and receives all rows for a given block. // Returns an error if any remote hosts are unreachable. func (s *FragmentSyncer) syncBlock(id int) error { f := s.Fragment @@ -1397,7 +1394,7 @@ func (s *FragmentSyncer) syncBlock(id int) error { // Read pairs from each remote block. var pairSets []PairSet var clients []*Client - for _, node := range s.Cluster.FragmentNodes(f.DB(), f.Slice()) { + for _, node := range s.Cluster.FragmentNodes(f.Index(), f.Slice()) { if s.Host == node.Host { continue } @@ -1414,14 +1411,14 @@ func (s *FragmentSyncer) syncBlock(id int) error { clients = append(clients, client) // Only sync the standard block. - bitmapIDs, profileIDs, err := client.BlockData(context.Background(), f.DB(), f.Frame(), ViewStandard, f.Slice(), id) + rowIDs, columnIDs, err := client.BlockData(context.Background(), f.Index(), f.Frame(), ViewStandard, f.Slice(), id) if err != nil { return err } pairSets = append(pairSets, PairSet{ - ProfileIDs: profileIDs, - BitmapIDs: bitmapIDs, + ColumnIDs: columnIDs, + RowIDs: rowIDs, }) } @@ -1441,7 +1438,7 @@ func (s *FragmentSyncer) syncBlock(id int) error { set, clear := sets[i], clears[i] // Ignore if there are no differences. - if len(set.ProfileIDs) == 0 && len(clear.ProfileIDs) == 0 { + if len(set.ColumnIDs) == 0 && len(clear.ColumnIDs) == 0 { continue } @@ -1449,11 +1446,11 @@ func (s *FragmentSyncer) syncBlock(id int) error { var buf bytes.Buffer // Only sync the standard block. - for j := 0; j < len(set.ProfileIDs); j++ { - fmt.Fprintf(&buf, "SetBit(frame=%q, id=%d, profileID=%d)\n", f.Frame(), set.BitmapIDs[j], (f.Slice()*SliceWidth)+set.ProfileIDs[j]) + for j := 0; j < len(set.ColumnIDs); j++ { + fmt.Fprintf(&buf, "SetBit(frame=%q, id=%d, columnID=%d)\n", f.Frame(), set.RowIDs[j], (f.Slice()*SliceWidth)+set.ColumnIDs[j]) } - for j := 0; j < len(clear.ProfileIDs); j++ { - fmt.Fprintf(&buf, "ClearBit(frame=%q, id=%d, profileID=%d)\n", f.Frame(), clear.BitmapIDs[j], (f.Slice()*SliceWidth)+clear.ProfileIDs[j]) + for j := 0; j < len(clear.ColumnIDs); j++ { + fmt.Fprintf(&buf, "ClearBit(frame=%q, id=%d, columnID=%d)\n", f.Frame(), clear.RowIDs[j], (f.Slice()*SliceWidth)+clear.ColumnIDs[j]) } // Verify sync is not prematurely closing. @@ -1462,7 +1459,7 @@ func (s *FragmentSyncer) syncBlock(id int) error { } // Execute query. - _, err := clients[i].ExecuteQuery(context.Background(), f.DB(), buf.String(), false) + _, err := clients[i].ExecuteQuery(context.Background(), f.Index(), buf.String(), false) if err != nil { return err } @@ -1479,10 +1476,10 @@ func madvise(b []byte, advice int) (err error) { return } -// PairSet is a list of equal length bitmap and profile id lists. +// PairSet is a list of equal length row and column id lists. type PairSet struct { - BitmapIDs []uint64 - ProfileIDs []uint64 + RowIDs []uint64 + ColumnIDs []uint64 } // byteSlicesEqual returns true if all slices are equal. @@ -1499,7 +1496,7 @@ func byteSlicesEqual(a [][]byte) bool { return true } -// Pos returns the bitmap position of a bitmap/profile pair. -func Pos(bitmapID, profileID uint64) uint64 { - return (bitmapID * SliceWidth) + (profileID % SliceWidth) +// Pos returns the row position of a row/column pair. +func Pos(rowID, columnID uint64) uint64 { + return (rowID * SliceWidth) + (columnID % SliceWidth) } diff --git a/fragment_test.go b/fragment_test.go index f54f5415b..f4d3fc247 100644 --- a/fragment_test.go +++ b/fragment_test.go @@ -23,7 +23,7 @@ const SliceWidth = pilosa.SliceWidth // Ensure a fragment can set a bit and retrieve it. func TestFragment_SetBit(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set bits on the fragment. @@ -35,26 +35,26 @@ func TestFragment_SetBit(t *testing.T) { t.Fatal(err) } - // Verify counts on bitmaps. - if n := f.Bitmap(120).Count(); n != 2 { + // Verify counts on rows. + if n := f.Row(120).Count(); n != 2 { t.Fatalf("unexpected count: %d", n) - } else if n := f.Bitmap(121).Count(); n != 1 { + } else if n := f.Row(121).Count(); n != 1 { t.Fatalf("unexpected count: %d", n) } // Close and reopen the fragment & verify the data. if err := f.Reopen(); err != nil { t.Fatal(err) - } else if n := f.Bitmap(120).Count(); n != 2 { + } else if n := f.Row(120).Count(); n != 2 { t.Fatalf("unexpected count (reopen): %d", n) - } else if n := f.Bitmap(121).Count(); n != 1 { + } else if n := f.Row(121).Count(); n != 1 { t.Fatalf("unexpected count (reopen): %d", n) } } // Ensure a fragment can clear a set bit. func TestFragment_ClearBit(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set and then clear bits on the fragment. @@ -66,22 +66,22 @@ func TestFragment_ClearBit(t *testing.T) { t.Fatal(err) } - // Verify count on bitmap. - if n := f.Bitmap(1000).Count(); n != 1 { + // Verify count on row. + if n := f.Row(1000).Count(); n != 1 { t.Fatalf("unexpected count: %d", n) } // Close and reopen the fragment & verify the data. if err := f.Reopen(); err != nil { t.Fatal(err) - } else if n := f.Bitmap(1000).Count(); n != 1 { + } else if n := f.Row(1000).Count(); n != 1 { t.Fatalf("unexpected count (reopen): %d", n) } } // Ensure a fragment can snapshot correctly. func TestFragment_Snapshot(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set and then clear bits on the fragment. @@ -96,21 +96,21 @@ func TestFragment_Snapshot(t *testing.T) { // Snapshot bitmap and verify data. if err := f.Snapshot(); err != nil { t.Fatal(err) - } else if n := f.Bitmap(1000).Count(); n != 1 { + } else if n := f.Row(1000).Count(); n != 1 { t.Fatalf("unexpected count: %d", n) } // Close and reopen the fragment & verify the data. if err := f.Reopen(); err != nil { t.Fatal(err) - } else if n := f.Bitmap(1000).Count(); n != 1 { + } else if n := f.Row(1000).Count(); n != 1 { t.Fatalf("unexpected count (reopen): %d", n) } } // Ensure a fragment can iterate over all bits in order. func TestFragment_ForEachBit(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set bits on the fragment. @@ -124,8 +124,8 @@ func TestFragment_ForEachBit(t *testing.T) { // Iterate over bits. var result [][2]uint64 - if err := f.ForEachBit(func(bitmapID, profileID uint64) error { - result = append(result, [2]uint64{bitmapID, profileID}) + if err := f.ForEachBit(func(rowID, columnID uint64) error { + result = append(result, [2]uint64{rowID, columnID}) return nil }); err != nil { t.Fatal(err) @@ -139,15 +139,15 @@ func TestFragment_ForEachBit(t *testing.T) { // Ensure a fragment can return the top n results. func TestFragment_Top(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() - // Set bits on the bitmaps 100, 101, & 102. + // Set bits on the rows 100, 101, & 102. f.MustSetBits(100, 1, 3, 200) f.MustSetBits(101, 1) f.MustSetBits(102, 1, 2) - // Retrieve top bitmaps. + // Retrieve top rows. if pairs, err := f.Top(pilosa.TopOptions{N: 2}); err != nil { t.Fatal(err) } else if len(pairs) != 2 { @@ -159,21 +159,21 @@ func TestFragment_Top(t *testing.T) { } } -// Ensure a fragment can filter bitmaps when retrieving the top n bitmaps. +// Ensure a fragment can filter rows when retrieving the top n rows. func TestFragment_Top_Filter(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() - // Set bits on the bitmaps 100, 101, & 102. + // Set bits on the rows 100, 101, & 102. f.MustSetBits(100, 1, 3, 200) f.MustSetBits(101, 1) f.MustSetBits(102, 1, 2) // Assign attributes. - f.BitmapAttrStore.SetAttrs(101, map[string]interface{}{"x": uint64(10)}) - f.BitmapAttrStore.SetAttrs(102, map[string]interface{}{"x": uint64(20)}) + f.RowAttrStore.SetAttrs(101, map[string]interface{}{"x": uint64(10)}) + f.RowAttrStore.SetAttrs(102, map[string]interface{}{"x": uint64(20)}) - // Retrieve top bitmaps. + // Retrieve top rows. if pairs, err := f.Top(pilosa.TopOptions{ N: 2, FilterField: "x", @@ -189,21 +189,21 @@ func TestFragment_Top_Filter(t *testing.T) { } } -// Ensure a fragment can return top bitmaps that intersect with an input bitmap. +// Ensure a fragment can return top rows that intersect with an input row. func TestFragment_TopN_Intersect(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() - // Create an intersecting input bitmap. + // Create an intersecting input row. src := pilosa.NewBitmap(1, 2, 3) - // Set bits on various bitmaps. + // Set bits on various rows. f.MustSetBits(100, 1, 10, 11, 12) // one intersection f.MustSetBits(101, 1, 2, 3, 4) // three intersections f.MustSetBits(102, 1, 2, 4, 5, 6) // two intersections f.MustSetBits(103, 1000, 1001, 1002) // no intersection - // Retrieve top bitmaps. + // Retrieve top rows. if pairs, err := f.Top(pilosa.TopOptions{N: 3, Src: src}); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(pairs, []pilosa.Pair{ @@ -215,29 +215,29 @@ func TestFragment_TopN_Intersect(t *testing.T) { } } -// Ensure a fragment can return top bitmaps that have many bits set. +// Ensure a fragment can return top rows that have many bits set. func TestFragment_TopN_Intersect_Large(t *testing.T) { if testing.Short() { t.Skip("short mode") } - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() - // Create an intersecting input bitmap. + // Create an intersecting input row. src := pilosa.NewBitmap( 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, ) - // Set bits on bitmaps 0 - 999. Higher bitmaps have higher bit counts. + // Set bits on rows 0 - 999. Higher rows have higher bit counts. for i := uint64(0); i < 1000; i++ { for j := uint64(0); j < i; j++ { f.MustSetBits(i, j) } } - // Retrieve top bitmaps. + // Retrieve top rows. if pairs, err := f.Top(pilosa.TopOptions{N: 10, Src: src}); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(pairs, []pilosa.Pair{ @@ -256,18 +256,18 @@ func TestFragment_TopN_Intersect_Large(t *testing.T) { } } -// Ensure a fragment can return top bitmaps when specified by ID. -func TestFragment_TopN_BitmapIDs(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) +// Ensure a fragment can return top rows when specified by ID. +func TestFragment_TopN_IDs(t *testing.T) { + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() - // Set bits on various bitmaps. + // Set bits on various rows. f.MustSetBits(100, 1, 2, 3) f.MustSetBits(101, 4, 5, 6, 7) f.MustSetBits(102, 8, 9, 10, 11, 12) - // Retrieve top bitmaps. - if pairs, err := f.Top(pilosa.TopOptions{BitmapIDs: []uint64{100, 101, 200}}); err != nil { + // Retrieve top rows. + if pairs, err := f.Top(pilosa.TopOptions{RowIDs: []uint64{100, 101, 200}}); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(pairs, []pilosa.Pair{ {ID: 101, Count: 4}, @@ -282,12 +282,12 @@ func TestFragment_TopN_CacheSize(t *testing.T) { slice := uint64(0) cacheSize := uint32(3) - // Create DB. - db := MustOpenDB() - defer db.Close() + // Create Index. + index := MustOpenIndex() + defer index.Close() // Create frame. - frame, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{CacheType: pilosa.CacheTypeRanked, CacheSize: cacheSize}) + frame, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{CacheType: pilosa.CacheTypeRanked, CacheSize: cacheSize}) if err != nil { t.Fatal(err) } @@ -307,16 +307,16 @@ func TestFragment_TopN_CacheSize(t *testing.T) { frag.Close() f := &Fragment{ - Fragment: frag, - BitmapAttrStore: MustOpenAttrStore(), + Fragment: frag, + RowAttrStore: MustOpenAttrStore(), } - f.Fragment.BitmapAttrStore = f.BitmapAttrStore.AttrStore + f.Fragment.RowAttrStore = f.RowAttrStore.AttrStore if err := f.Open(); err != nil { panic(err) } defer f.Close() - // Set bits on various bitmaps. + // Set bits on various rows. f.MustSetBits(100, 1, 2, 3) f.MustSetBits(101, 4, 5, 6, 7) f.MustSetBits(102, 8, 9, 10, 11, 12) @@ -332,7 +332,7 @@ func TestFragment_TopN_CacheSize(t *testing.T) { {ID: 102, Count: 5}, } - // Retrieve top bitmaps. + // Retrieve top rows. if pairs, err := f.Top(pilosa.TopOptions{N: 5}); err != nil { t.Fatal(err) } else if len(pairs) > int(cacheSize) { @@ -346,7 +346,7 @@ func TestFragment_TopN_CacheSize(t *testing.T) { // Ensure fragment can return a checksum for its blocks. func TestFragment_Checksum(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Retrieve checksum and set bits. @@ -365,7 +365,7 @@ func TestFragment_Checksum(t *testing.T) { // Ensure fragment can return a checksum for a given block. func TestFragment_Blocks(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Retrieve initial checksum. @@ -381,7 +381,7 @@ func TestFragment_Blocks(t *testing.T) { } prev = blocks - // Set bit on different bitmap. + // Set bit on different row. if _, err := f.SetBit(20, 0); err != nil { t.Fatal(err) } @@ -391,7 +391,7 @@ func TestFragment_Blocks(t *testing.T) { } prev = blocks - // Set bit on different profile. + // Set bit on different column. if _, err := f.SetBit(20, 100); err != nil { t.Fatal(err) } @@ -403,7 +403,7 @@ func TestFragment_Blocks(t *testing.T) { // Ensure fragment returns an empty checksum if no data exists for a block. func TestFragment_Blocks_Empty(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set bits on a different block. @@ -421,7 +421,7 @@ func TestFragment_Blocks_Empty(t *testing.T) { // Ensure a fragment's cache can be persisted between restarts. func TestFragment_LRUCache_Persistence(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() // Set bits on the fragment. @@ -453,11 +453,11 @@ func TestFragment_LRUCache_Persistence(t *testing.T) { // Ensure a fragment's cache can be persisted between restarts. func TestFragment_RankCache_Persistence(t *testing.T) { - db := MustOpenDB() - defer db.Close() + index := MustOpenIndex() + defer index.Close() // Create frame. - frame, err := db.CreateFrameIfNotExists("f", pilosa.FrameOptions{CacheType: pilosa.CacheTypeRanked}) + frame, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{CacheType: pilosa.CacheTypeRanked}) if err != nil { t.Fatal(err) } @@ -488,13 +488,13 @@ func TestFragment_RankCache_Persistence(t *testing.T) { t.Fatalf("unexpected cache len: %d", cache.Len()) } - // Reopen the database. - if err := db.Reopen(); err != nil { + // Reopen the index. + if err := index.Reopen(); err != nil { t.Fatal(err) } // Re-fetch fragment. - f = db.Frame("f").View(pilosa.ViewStandard).Fragment(0) + f = index.Frame("f").View(pilosa.ViewStandard).Fragment(0) // Re-verify correct cache type and size. if cache, ok := f.Cache().(*pilosa.RankCache); !ok { @@ -506,7 +506,7 @@ func TestFragment_RankCache_Persistence(t *testing.T) { // Ensure a fragment can be copied to another fragment. func TestFragment_WriteTo_ReadFrom(t *testing.T) { - f0 := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f0 := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f0.Close() // Set and then clear bits on the fragment. @@ -531,7 +531,7 @@ func TestFragment_WriteTo_ReadFrom(t *testing.T) { } // Read into another fragment. - f1 := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f1 := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) if rn, err := f1.ReadFrom(&buf); err != nil { t.Fatal(err) } else if wn != rn { @@ -544,7 +544,7 @@ func TestFragment_WriteTo_ReadFrom(t *testing.T) { } // Verify data in other fragment. - if a := f1.Bitmap(1000).Bits(); !reflect.DeepEqual(a, []uint64{2}) { + if a := f1.Row(1000).Bits(); !reflect.DeepEqual(a, []uint64{2}) { t.Fatalf("unexpected bits: %+v", a) } @@ -553,47 +553,18 @@ func TestFragment_WriteTo_ReadFrom(t *testing.T) { t.Fatal(err) } else if n := f1.Cache().Len(); n != 1 { t.Fatalf("unexpected cache size (reopen): %d", n) - } else if a := f1.Bitmap(1000).Bits(); !reflect.DeepEqual(a, []uint64{2}) { + } else if a := f1.Row(1000).Bits(); !reflect.DeepEqual(a, []uint64{2}) { t.Fatalf("unexpected bits (reopen): %+v", a) } } -/* -func BenchmarkFragment_BlockChecksum_Fill1(b *testing.B) { benchmarkFragmentBlockChecksum(b, 0.01) } -func BenchmarkFragment_BlockChecksum_Fill10(b *testing.B) { benchmarkFragmentBlockChecksum(b, 0.10) } -func BenchmarkFragment_BlockChecksum_Fill50(b *testing.B) { benchmarkFragmentBlockChecksum(b, 0.50) } - -func benchmarkFragmentBlockChecksum(b *testing.B, fillPercent float64) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) - defer f.Close() - - // Fill fragment. - bitmapIDs, profileIDs := GenerateImportFill(pilosa.HashBlockSize, fillPercent) - if err := f.Import(bitmapIDs, profileIDs); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.ReportAllocs() - - // Calculate block checksum. - for i := 0; i < b.N; i++ { - f.InvalidateChecksums() - - if chksum := f.BlockChecksum(0); chksum == nil { - b.Fatal("expected checksum") - } - } -} -*/ - func BenchmarkFragment_Blocks(b *testing.B) { if *FragmentPath == "" { b.Skip("no fragment specified") } // Open the fragment specified by the path. - f := pilosa.NewFragment(*FragmentPath, "d", "f", pilosa.ViewStandard, 0) + f := pilosa.NewFragment(*FragmentPath, "i", "f", pilosa.ViewStandard, 0) if err := f.Open(); err != nil { b.Fatal(err) } @@ -609,7 +580,7 @@ func BenchmarkFragment_Blocks(b *testing.B) { } func BenchmarkFragment_IntersectionCount(b *testing.B) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() f.MaxOpN = math.MaxInt32 @@ -633,7 +604,7 @@ func BenchmarkFragment_IntersectionCount(b *testing.B) { // Start benchmark b.ResetTimer() for i := 0; i < b.N; i++ { - if n := f.Bitmap(1).IntersectionCount(f.Bitmap(2)); n == 0 { + if n := f.Row(1).IntersectionCount(f.Row(2)); n == 0 { b.Fatalf("unexpected count: %d", n) } } @@ -642,11 +613,11 @@ func BenchmarkFragment_IntersectionCount(b *testing.B) { // Fragment is a test wrapper for pilosa.Fragment. type Fragment struct { *pilosa.Fragment - BitmapAttrStore *AttrStore + RowAttrStore *AttrStore } // NewFragment returns a new instance of Fragment with a temporary path. -func NewFragment(db, frame, view string, slice uint64) *Fragment { +func NewFragment(index, frame, view string, slice uint64) *Fragment { file, err := ioutil.TempFile("", "pilosa-fragment-") if err != nil { panic(err) @@ -654,16 +625,16 @@ func NewFragment(db, frame, view string, slice uint64) *Fragment { file.Close() f := &Fragment{ - Fragment: pilosa.NewFragment(file.Name(), db, frame, view, slice), - BitmapAttrStore: MustOpenAttrStore(), + Fragment: pilosa.NewFragment(file.Name(), index, frame, view, slice), + RowAttrStore: MustOpenAttrStore(), } - f.Fragment.BitmapAttrStore = f.BitmapAttrStore.AttrStore + f.Fragment.RowAttrStore = f.RowAttrStore.AttrStore return f } // MustOpenFragment creates and opens an fragment at a temporary path. Panic on error. -func MustOpenFragment(db, frame, view string, slice uint64) *Fragment { - f := NewFragment(db, frame, view, slice) +func MustOpenFragment(index, frame, view string, slice uint64) *Fragment { + f := NewFragment(index, frame, view, slice) if err := f.Open(); err != nil { panic(err) } @@ -674,7 +645,7 @@ func MustOpenFragment(db, frame, view string, slice uint64) *Fragment { func (f *Fragment) Close() error { defer os.Remove(f.Path()) defer os.Remove(f.CachePath()) - defer f.BitmapAttrStore.Close() + defer f.RowAttrStore.Close() return f.Fragment.Close() } @@ -685,76 +656,76 @@ func (f *Fragment) Reopen() error { return err } - f.Fragment = pilosa.NewFragment(path, f.DB(), f.Frame(), f.View(), f.Slice()) - f.Fragment.BitmapAttrStore = f.BitmapAttrStore.AttrStore + f.Fragment = pilosa.NewFragment(path, f.Index(), f.Frame(), f.View(), f.Slice()) + f.Fragment.RowAttrStore = f.RowAttrStore.AttrStore if err := f.Open(); err != nil { return err } return nil } -// MustSetBits sets bits on a bitmap. Panic on error. +// MustSetBits sets bits on a row. Panic on error. // This function does not accept a timestamp or quantum. -func (f *Fragment) MustSetBits(bitmapID uint64, profileIDs ...uint64) { - for _, profileID := range profileIDs { - if _, err := f.SetBit(bitmapID, profileID); err != nil { +func (f *Fragment) MustSetBits(rowID uint64, columnIDs ...uint64) { + for _, columnID := range columnIDs { + if _, err := f.SetBit(rowID, columnID); err != nil { panic(err) } } } -// MustClearBits clears bits on a bitmap. Panic on error. -func (f *Fragment) MustClearBits(bitmapID uint64, profileIDs ...uint64) { - for _, profileID := range profileIDs { - if _, err := f.ClearBit(bitmapID, profileID); err != nil { +// MustClearBits clears bits on a row. Panic on error. +func (f *Fragment) MustClearBits(rowID uint64, columnIDs ...uint64) { + for _, columnID := range columnIDs { + if _, err := f.ClearBit(rowID, columnID); err != nil { panic(err) } } } -// BitmapAttrStore provides simple storage for attributes. -type BitmapAttrStore struct { +// RowAttrStore provides simple storage for attributes. +type RowAttrStore struct { attrs map[uint64]map[string]interface{} } -// NewBitmapAttrStore returns a new instance of BitmapAttrStore. -func NewBitmapAttrStore() *BitmapAttrStore { - return &BitmapAttrStore{ +// NewRowAttrStore returns a new instance of RowAttrStore. +func NewRowAttrStore() *RowAttrStore { + return &RowAttrStore{ attrs: make(map[uint64]map[string]interface{}), } } -// BitmapAttrs returns the attributes set to a bitmap id. -func (s *BitmapAttrStore) BitmapAttrs(id uint64) (map[string]interface{}, error) { +// RowAttrs returns the attributes set to a row id. +func (s *RowAttrStore) RowAttrs(id uint64) (map[string]interface{}, error) { return s.attrs[id], nil } -// SetBitmapAttrs assigns a set of attributes to a bitmap id. -func (s *BitmapAttrStore) SetBitmapAttrs(id uint64, m map[string]interface{}) { +// SetRowAttrs assigns a set of attributes to a row id. +func (s *RowAttrStore) SetRowAttrs(id uint64, m map[string]interface{}) { s.attrs[id] = m } // GenerateImportFill generates a set of bits pairs that evenly fill a fragment chunk. -func GenerateImportFill(bitmapN int, pct float64) (bitmapIDs, profileIDs []uint64) { +func GenerateImportFill(rowN int, pct float64) (rowIDs, columnIDs []uint64) { ipct := int(pct * 100) - for i := 0; i < SliceWidth*bitmapN; i++ { + for i := 0; i < SliceWidth*rowN; i++ { if i%100 >= ipct { continue } - bitmapIDs = append(bitmapIDs, uint64(i%SliceWidth)) - profileIDs = append(profileIDs, uint64(i/SliceWidth)) + rowIDs = append(rowIDs, uint64(i%SliceWidth)) + columnIDs = append(columnIDs, uint64(i/SliceWidth)) } return } func TestFragment_Tanimoto(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() src := pilosa.NewBitmap(1, 2, 3) - // Set bits on the bitmaps 100, 101, & 102. + // Set bits on the rows 100, 101, & 102. f.MustSetBits(100, 1, 3, 2, 200) f.MustSetBits(101, 1, 3) f.MustSetBits(102, 1, 2, 10, 12) @@ -771,12 +742,12 @@ func TestFragment_Tanimoto(t *testing.T) { } func TestFragment_Zero_Tanimoto(t *testing.T) { - f := MustOpenFragment("d", "f", pilosa.ViewStandard, 0) + f := MustOpenFragment("i", "f", pilosa.ViewStandard, 0) defer f.Close() src := pilosa.NewBitmap(1, 2, 3) - // Set bits on the bitmaps 100, 101, & 102. + // Set bits on the rows 100, 101, & 102. f.MustSetBits(100, 1, 3, 2, 200) f.MustSetBits(101, 1, 3) f.MustSetBits(102, 1, 2, 10, 12) diff --git a/frame.go b/frame.go index d173d9a07..f15581763 100644 --- a/frame.go +++ b/frame.go @@ -29,14 +29,14 @@ const ( type Frame struct { mu sync.Mutex path string - db string + index string name string timeQuantum TimeQuantum views map[string]*View - // Bitmap attribute storage and cache - bitmapAttrStore *AttrStore + // Row attribute storage and cache + rowAttrStore *AttrStore broadcaster Broadcaster Stats StatsClient @@ -53,19 +53,19 @@ type Frame struct { } // NewFrame returns a new instance of frame. -func NewFrame(path, db, name string) (*Frame, error) { +func NewFrame(path, index, name string) (*Frame, error) { err := ValidateName(name) if err != nil { return nil, err } return &Frame{ - path: path, - db: db, - name: name, + path: path, + index: index, + name: name, - views: make(map[string]*View), - bitmapAttrStore: NewAttrStore(filepath.Join(path, ".data")), + views: make(map[string]*View), + rowAttrStore: NewAttrStore(filepath.Join(path, ".data")), Stats: NopStatsClient, @@ -81,14 +81,14 @@ func NewFrame(path, db, name string) (*Frame, error) { // Name returns the name the frame was initialized with. func (f *Frame) Name() string { return f.name } -// DB returns the database name the frame was initialized with. -func (f *Frame) DB() string { return f.db } +// Index returns the index name the frame was initialized with. +func (f *Frame) Index() string { return f.index } // Path returns the path the frame was initialized with. func (f *Frame) Path() string { return f.path } -// BitmapAttrStore returns the attribute storage. -func (f *Frame) BitmapAttrStore() *AttrStore { return f.bitmapAttrStore } +// RowAttrStore returns the attribute storage. +func (f *Frame) RowAttrStore() *AttrStore { return f.rowAttrStore } // MaxSlice returns the max slice in the frame. func (f *Frame) MaxSlice() uint64 { @@ -215,7 +215,7 @@ func (f *Frame) Open() error { return err } - if err := f.bitmapAttrStore.Open(); err != nil { + if err := f.rowAttrStore.Open(); err != nil { return err } @@ -253,7 +253,7 @@ func (f *Frame) openViews() error { if err := view.Open(); err != nil { return fmt.Errorf("open view: view=%s, err=%s", view.Name(), err) } - view.BitmapAttrStore = f.bitmapAttrStore + view.RowAttrStore = f.rowAttrStore f.views[view.Name()] = view f.Stats.Count("maxSlice", 1) @@ -326,8 +326,8 @@ func (f *Frame) Close() error { defer f.mu.Unlock() // Close the attribute store. - if f.bitmapAttrStore != nil { - _ = f.bitmapAttrStore.Close() + if f.rowAttrStore != nil { + _ = f.rowAttrStore.Close() } // Close all views. @@ -411,17 +411,17 @@ func (f *Frame) CreateViewIfNotExists(name string) (*View, error) { if err := view.Open(); err != nil { return nil, err } - view.BitmapAttrStore = f.bitmapAttrStore + view.RowAttrStore = f.rowAttrStore f.views[view.Name()] = view return view, nil } func (f *Frame) newView(path, name string) *View { - view := NewView(path, f.db, f.name, name, f.cacheSize) + view := NewView(path, f.index, f.name, name, f.cacheSize) view.cacheType = f.cacheType view.LogOutput = f.LogOutput - view.BitmapAttrStore = f.bitmapAttrStore + view.RowAttrStore = f.rowAttrStore view.stats = f.Stats.WithTags(fmt.Sprintf("slice:%s", name)) return view } @@ -511,17 +511,17 @@ func (f *Frame) ClearBit(name string, rowID, colID uint64, t *time.Time) (change } // Import bulk imports data. -func (f *Frame) Import(bitmapIDs, profileIDs []uint64, timestamps []*time.Time) error { +func (f *Frame) Import(rowIDs, columnIDs []uint64, timestamps []*time.Time) error { // Determine quantum if timestamps are set. q := f.TimeQuantum() if hasTime(timestamps) && q == "" { - return errors.New("time quantum not set in either database or frame") + return errors.New("time quantum not set in either index or frame") } // Split import data by fragment. dataByFragment := make(map[importKey]importData) - for i := range bitmapIDs { - bitmapID, profileID, timestamp := bitmapIDs[i], profileIDs[i], timestamps[i] + for i := range rowIDs { + rowID, columnID, timestamp := rowIDs[i], columnIDs[i], timestamps[i] var standard, inverse []string if timestamp == nil { @@ -534,20 +534,20 @@ func (f *Frame) Import(bitmapIDs, profileIDs []uint64, timestamps []*time.Time) // Attach bit to each standard view. for _, name := range standard { - key := importKey{View: name, Slice: profileID / SliceWidth} + key := importKey{View: name, Slice: columnID / SliceWidth} data := dataByFragment[key] - data.BitmapIDs = append(data.BitmapIDs, bitmapID) - data.ProfileIDs = append(data.ProfileIDs, profileID) + data.RowIDs = append(data.RowIDs, rowID) + data.ColumnIDs = append(data.ColumnIDs, columnID) dataByFragment[key] = data } if f.inverseEnabled { // Attach reversed bits to each inverse view. for _, name := range inverse { - key := importKey{View: name, Slice: bitmapID / SliceWidth} + key := importKey{View: name, Slice: rowID / SliceWidth} data := dataByFragment[key] - data.BitmapIDs = append(data.BitmapIDs, profileID) // reversed - data.ProfileIDs = append(data.ProfileIDs, bitmapID) // reversed + data.RowIDs = append(data.RowIDs, columnID) // reversed + data.ColumnIDs = append(data.ColumnIDs, rowID) // reversed dataByFragment[key] = data } } @@ -563,8 +563,8 @@ func (f *Frame) Import(bitmapIDs, profileIDs []uint64, timestamps []*time.Time) // Re-sort data for inverse views. if IsInverseView(key.View) { sort.Sort(importBitSet{ - bitmapIDs: data.BitmapIDs, - profileIDs: data.ProfileIDs, + rowIDs: data.RowIDs, + columnIDs: data.ColumnIDs, }) } @@ -578,7 +578,7 @@ func (f *Frame) Import(bitmapIDs, profileIDs []uint64, timestamps []*time.Time) return err } - if err := frag.Import(data.BitmapIDs, data.ProfileIDs); err != nil { + if err := frag.Import(data.RowIDs, data.ColumnIDs); err != nil { return err } } @@ -650,15 +650,15 @@ func (o *FrameOptions) Encode() *internal.FrameMeta { // importBitSet represents slices of row and column ids. // This is used to sort data during import. type importBitSet struct { - bitmapIDs, profileIDs []uint64 + rowIDs, columnIDs []uint64 } func (p importBitSet) Swap(i, j int) { - p.bitmapIDs[i], p.bitmapIDs[j] = p.bitmapIDs[j], p.bitmapIDs[i] - p.profileIDs[i], p.profileIDs[j] = p.profileIDs[j], p.profileIDs[i] + p.rowIDs[i], p.rowIDs[j] = p.rowIDs[j], p.rowIDs[i] + p.columnIDs[i], p.columnIDs[j] = p.columnIDs[j], p.columnIDs[i] } -func (p importBitSet) Len() int { return len(p.bitmapIDs) } -func (p importBitSet) Less(i, j int) bool { return p.bitmapIDs[i] < p.bitmapIDs[j] } +func (p importBitSet) Len() int { return len(p.rowIDs) } +func (p importBitSet) Less(i, j int) bool { return p.rowIDs[i] < p.rowIDs[j] } // Cache types. const ( diff --git a/frame_test.go b/frame_test.go index 4814ca67e..669ffcc40 100644 --- a/frame_test.go +++ b/frame_test.go @@ -60,7 +60,7 @@ func TestFrame_NameRestriction(t *testing.T) { if err != nil { panic(err) } - frame, err := pilosa.NewFrame(path, "d", ".meta") + frame, err := pilosa.NewFrame(path, "i", ".meta") if frame != nil { t.Fatalf("unexpected frame name %s", err) } @@ -77,7 +77,7 @@ func NewFrame() *Frame { if err != nil { panic(err) } - frame, err := pilosa.NewFrame(path, "d", "f") + frame, err := pilosa.NewFrame(path, "i", "f") if err != nil { panic(err) } @@ -99,15 +99,15 @@ func (f *Frame) Close() error { return f.Frame.Close() } -// Reopen closes the database and reopens it. +// Reopen closes the index and reopens it. func (f *Frame) Reopen() error { var err error if err := f.Frame.Close(); err != nil { return err } - path, db, name := f.Path(), f.DB(), f.Name() - f.Frame, err = pilosa.NewFrame(path, db, name) + path, index, name := f.Path(), f.Index(), f.Name() + f.Frame, err = pilosa.NewFrame(path, index, name) if err != nil { return err } @@ -119,8 +119,8 @@ func (f *Frame) Reopen() error { } // MustSetBit sets a bit on the frame. Panic on error. -func (f *Frame) MustSetBit(view string, bitmapID, profileID uint64, t *time.Time) (changed bool) { - changed, err := f.SetBit(view, bitmapID, profileID, t) +func (f *Frame) MustSetBit(view string, rowID, columnID uint64, t *time.Time) (changed bool) { + changed, err := f.SetBit(view, rowID, columnID, t) if err != nil { panic(err) } diff --git a/gossip/gossip.go b/gossip/gossip.go index 513bd03d1..c3ded776a 100644 --- a/gossip/gossip.go +++ b/gossip/gossip.go @@ -23,8 +23,8 @@ type GossipNodeSet struct { broadcasts *memberlist.TransmitLimitedQueue - stateHandler pilosa.StateHandler - config *GossipConfig + statusHandler pilosa.StatusHandler + config *GossipConfig // The writer for any logging. LogOutput io.Writer @@ -81,7 +81,7 @@ type GossipConfig struct { } // NewGossipNodeSet returns a new instance of GossipNodeSet. -func NewGossipNodeSet(name string, gossipHost string, gossipPort int, gossipSeed string, sh pilosa.StateHandler) *GossipNodeSet { +func NewGossipNodeSet(name string, gossipHost string, gossipPort int, gossipSeed string, sh pilosa.StatusHandler) *GossipNodeSet { g := &GossipNodeSet{ LogOutput: os.Stderr, } @@ -98,7 +98,7 @@ func NewGossipNodeSet(name string, gossipHost string, gossipPort int, gossipSeed g.config.memberlistConfig.AdvertisePort = gossipPort g.config.memberlistConfig.Delegate = g - g.stateHandler = sh + g.statusHandler = sh return g } @@ -168,7 +168,7 @@ func (g *GossipNodeSet) GetBroadcasts(overhead, limit int) [][]byte { } func (g *GossipNodeSet) LocalState(join bool) []byte { - pb, err := g.stateHandler.LocalState() + pb, err := g.statusHandler.LocalStatus() if err != nil { g.logger().Printf("error getting local state, err=%s", err) return []byte{} @@ -185,12 +185,12 @@ func (g *GossipNodeSet) LocalState(join bool) []byte { func (g *GossipNodeSet) MergeRemoteState(buf []byte, join bool) { // Unmarshal nodestate data. - var pb internal.NodeState + var pb internal.NodeStatus if err := proto.Unmarshal(buf, &pb); err != nil { g.logger().Printf("error unmarshalling nodestate data, err=%s", err) return } - err := g.stateHandler.HandleRemoteState(&pb) + err := g.statusHandler.HandleRemoteStatus(&pb) if err != nil { g.logger().Printf("merge state error: %s", err) } diff --git a/handler.go b/handler.go index 419d3353d..71f85b0c5 100644 --- a/handler.go +++ b/handler.go @@ -17,18 +17,19 @@ import ( "strings" "time" + "reflect" + "github.com/gogo/protobuf/proto" "github.com/gorilla/mux" "github.com/pilosa/pilosa/internal" "github.com/pilosa/pilosa/pql" - "reflect" ) // Handler represents an HTTP handler. type Handler struct { - Index *Index + Holder *Holder Broadcaster Broadcaster - ServerHandler StateHandler + StatusHandler StatusHandler // Local hostname & cluster configuration. Host string @@ -38,7 +39,7 @@ type Handler struct { // The execution engine for running queries. Executor interface { - Execute(context context.Context, db string, query *pql.Query, slices []uint64, opt *ExecOptions) ([]interface{}, error) + Execute(context context.Context, index string, query *pql.Query, slices []uint64, opt *ExecOptions) ([]interface{}, error) } // The version to report on the /version endpoint. @@ -59,20 +60,20 @@ func NewHandler() *Handler { func NewRouter(handler *Handler) *mux.Router { router := mux.NewRouter() - router.HandleFunc("/db", handler.handleGetDBs).Methods("GET") - router.HandleFunc("/db/{db}", handler.handleGetDB).Methods("GET") - router.HandleFunc("/db/{db}", handler.handlePostDB).Methods("POST") - router.HandleFunc("/db/{db}", handler.handleDeleteDB).Methods("DELETE") - router.HandleFunc("/db/{db}/attr/diff", handler.handlePostDBAttrDiff).Methods("POST") - //router.HandleFunc("/db/{db}/frame", handler.handleGetFrames).Methods("GET") // Not implemented. - router.HandleFunc("/db/{db}/frame/{frame}", handler.handlePostFrame).Methods("POST") - router.HandleFunc("/db/{db}/frame/{frame}", handler.handleDeleteFrame).Methods("DELETE") - router.HandleFunc("/db/{db}/query", handler.handlePostQuery).Methods("POST") - router.HandleFunc("/db/{db}/frame/{frame}/attr/diff", handler.handlePostFrameAttrDiff).Methods("POST") - router.HandleFunc("/db/{db}/frame/{frame}/restore", handler.handlePostFrameRestore).Methods("POST") - router.HandleFunc("/db/{db}/frame/{frame}/time-quantum", handler.handlePatchFrameTimeQuantum).Methods("PATCH") - router.HandleFunc("/db/{db}/frame/{frame}/views", handler.handleGetFrameViews).Methods("GET") - router.HandleFunc("/db/{db}/time-quantum", handler.handlePatchDBTimeQuantum).Methods("PATCH") + router.HandleFunc("/index", handler.handleGetIndexes).Methods("GET") + router.HandleFunc("/index/{index}", handler.handleGetIndex).Methods("GET") + router.HandleFunc("/index/{index}", handler.handlePostIndex).Methods("POST") + router.HandleFunc("/index/{index}", handler.handleDeleteIndex).Methods("DELETE") + router.HandleFunc("/index/{index}/attr/diff", handler.handlePostIndexAttrDiff).Methods("POST") + //router.HandleFunc("/index/{index}/frame", handler.handleGetFrames).Methods("GET") // Not implemented. + router.HandleFunc("/index/{index}/frame/{frame}", handler.handlePostFrame).Methods("POST") + router.HandleFunc("/index/{index}/frame/{frame}", handler.handleDeleteFrame).Methods("DELETE") + router.HandleFunc("/index/{index}/query", handler.handlePostQuery).Methods("POST") + router.HandleFunc("/index/{index}/frame/{frame}/attr/diff", handler.handlePostFrameAttrDiff).Methods("POST") + router.HandleFunc("/index/{index}/frame/{frame}/restore", handler.handlePostFrameRestore).Methods("POST") + router.HandleFunc("/index/{index}/frame/{frame}/time-quantum", handler.handlePatchFrameTimeQuantum).Methods("PATCH") + router.HandleFunc("/index/{index}/frame/{frame}/views", handler.handleGetFrameViews).Methods("GET") + router.HandleFunc("/index/{index}/time-quantum", handler.handlePatchIndexTimeQuantum).Methods("PATCH") router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux).Methods("GET") router.HandleFunc("/debug/vars", handler.handleExpvar).Methods("GET") router.HandleFunc("/export", handler.handleGetExport).Methods("GET") @@ -92,7 +93,7 @@ func NewRouter(handler *Handler) *mux.Router { // Ideally this would be automatic, as described in this (wontfix) ticket: // https://github.com/gorilla/mux/issues/6 // For now we just do it for the most commonly used handler, /query - router.HandleFunc("/db/{db}/query", handler.methodNotAllowedHandler).Methods("GET") + router.HandleFunc("/index/{index}/query", handler.methodNotAllowedHandler).Methods("GET") return router } @@ -109,39 +110,36 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // handleGetSchema handles GET /schema requests. func (h *Handler) handleGetSchema(w http.ResponseWriter, r *http.Request) { if err := json.NewEncoder(w).Encode(getSchemaResponse{ - DBs: h.Index.Schema(), + Indexes: h.Holder.Schema(), }); err != nil { h.logger().Printf("write schema response error: %s", err) } } func (h *Handler) handleGetStatus(w http.ResponseWriter, r *http.Request) { - // Compute my local state - fmt.Println("Call interface") - h.ServerHandler.LocalState() - + status, err := h.StatusHandler.ClusterStatus() + if err != nil { + h.logger().Printf("cluster status error: %s", err) + return + } if err := json.NewEncoder(w).Encode(getStatusResponse{ - Health: h.Cluster.NodeState, - Version: h.Version, - Replicas: h.Cluster.ReplicaN, + Status: status, }); err != nil { h.logger().Printf("Node State Error: %s", err) } } type getSchemaResponse struct { - DBs []*DBInfo `json:"dbs"` + Indexes []*IndexInfo `json:"indexes"` } type getStatusResponse struct { - Health map[string]*internal.NodeState `json:"health"` - Version string `json:"version"` - Replicas int + Status proto.Message `json:"status"` } // handlePostQuery handles /query requests. func (h *Handler) handlePostQuery(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] // Parse incoming request. req, err := h.readQueryRequest(r) @@ -165,29 +163,29 @@ func (h *Handler) handlePostQuery(w http.ResponseWriter, r *http.Request) { } // Execute the query. - results, err := h.Executor.Execute(r.Context(), dbName, q, req.Slices, opt) + results, err := h.Executor.Execute(r.Context(), indexName, q, req.Slices, opt) resp := &QueryResponse{Results: results, Err: err} - // Fill profile attributes if requested. - if req.Profiles { - // Consolidate all profile ids across all calls. - var profileIDs []uint64 + // Fill column attributes if requested. + if req.ColumnAttrs { + // Consolidate all column ids across all calls. + var columnIDs []uint64 for _, result := range results { bm, ok := result.(*Bitmap) if !ok { continue } - profileIDs = uint64Slice(profileIDs).merge(bm.Bits()) + columnIDs = uint64Slice(columnIDs).merge(bm.Bits()) } - // Retrieve profile attributes across all calls. - profiles, err := h.readProfiles(h.Index.DB(dbName), profileIDs) + // Retrieve column attributes across all calls. + columnAttrSets, err := h.readColumnAttrSets(h.Holder.Index(indexName), columnIDs) if err != nil { w.WriteHeader(http.StatusInternalServerError) h.writeQueryResponse(w, r, &QueryResponse{Err: err}) return } - resp.Profiles = profiles + resp.ColumnAttrSets = columnAttrSets } // Set appropriate status code, if there is an error. @@ -204,9 +202,9 @@ func (h *Handler) handlePostQuery(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleGetSliceMax(w http.ResponseWriter, r *http.Request) { var ms map[string]uint64 if inverse, _ := strconv.ParseBool(r.URL.Query().Get("inverse")); inverse { - ms = h.Index.MaxInverseSlices() + ms = h.Holder.MaxInverseSlices() } else { - ms = h.Index.MaxSlices() + ms = h.Holder.MaxSlices() } if strings.Contains(r.Header.Get("Accept"), "application/x-protobuf") { pb := &internal.MaxSlicesResponse{ @@ -227,40 +225,40 @@ type sliceMaxResponse struct { MaxSlices map[string]uint64 `json:"maxSlices"` } -// handleGetDBs handles GET /db request. -func (h *Handler) handleGetDBs(w http.ResponseWriter, r *http.Request) { +// handleGetIndexes handles GET /index request. +func (h *Handler) handleGetIndexes(w http.ResponseWriter, r *http.Request) { h.handleGetSchema(w, r) } -// handleGetDB handles GET /db/ requests. -func (h *Handler) handleGetDB(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] - db := h.Index.DB(dbName) - if db == nil { - http.Error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound) +// handleGetIndex handles GET /index/ requests. +func (h *Handler) handleGetIndex(w http.ResponseWriter, r *http.Request) { + indexName := mux.Vars(r)["index"] + index := h.Holder.Index(indexName) + if index == nil { + http.Error(w, ErrIndexNotFound.Error(), http.StatusNotFound) return } - if err := json.NewEncoder(w).Encode(getDBResponse{ - map[string]string{"name": db.Name()}, + if err := json.NewEncoder(w).Encode(getIndexResponse{ + map[string]string{"name": index.Name()}, }); err != nil { h.logger().Printf("write response error: %s", err) } } -type getDBResponse struct { - DB map[string]string `json:"db"` +type getIndexResponse struct { + Index map[string]string `json:"index"` } -type postDBRequest struct { - Options DBOptions `json:"options"` +type postIndexRequest struct { + Options IndexOptions `json:"options"` } -//_postDBRequest is necessary to avoid recursion while decoding. -type _postDBRequest postDBRequest +//_postIndexRequest is necessary to avoid recursion while decoding. +type _postIndexRequest postIndexRequest -// Custom Unmarshal JSON to validate request body when creating a new database -func (p *postDBRequest) UnmarshalJSON(b []byte) error { +// Custom Unmarshal JSON to validate request body when creating a new index. +func (p *postIndexRequest) UnmarshalJSON(b []byte) error { // m is an overflow map used to capture additional, unexpected keys. m := make(map[string]interface{}) @@ -268,13 +266,13 @@ func (p *postDBRequest) UnmarshalJSON(b []byte) error { return err } - validDBOptions := getValidOptions(DBOptions{}) - err := validateOptions(m, validDBOptions) + validIndexOptions := getValidOptions(IndexOptions{}) + err := validateOptions(m, validIndexOptions) if err != nil { return err } // Unmarshal expected values. - var _p _postDBRequest + var _p _postIndexRequest if err := json.Unmarshal(b, &_p); err != nil { return err } @@ -285,7 +283,7 @@ func (p *postDBRequest) UnmarshalJSON(b []byte) error { } // Raise errors for any unknown key -func validateOptions(data map[string]interface{}, validDBOptions []string) error { +func validateOptions(data map[string]interface{}, validIndexOptions []string) error { for k, v := range data { switch k { case "options": @@ -294,7 +292,7 @@ func validateOptions(data map[string]interface{}, validDBOptions []string) error return errors.New("options is not map[string]interface{}") } for kk, vv := range options { - if !foundItem(validDBOptions, kk) { + if !foundItem(validIndexOptions, kk) { return fmt.Errorf("Unknown key: %v:%v", kk, vv) } } @@ -314,53 +312,53 @@ func foundItem(items []string, item string) bool { return false } -type postDBResponse struct{} +type postIndexResponse struct{} -// handleDeleteDB handles DELETE /db request. -func (h *Handler) handleDeleteDB(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] +// handleDeleteIndex handles DELETE /index request. +func (h *Handler) handleDeleteIndex(w http.ResponseWriter, r *http.Request) { + indexName := mux.Vars(r)["index"] - // Delete database from the index. - if err := h.Index.DeleteDB(dbName); err != nil { + // Delete index from the holder. + if err := h.Holder.DeleteIndex(indexName); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - // Send the delete database message to all nodes. + // Send the delete index message to all nodes. err := h.Broadcaster.SendSync( - &internal.DeleteDBMessage{ - DB: dbName, + &internal.DeleteIndexMessage{ + Index: indexName, }) if err != nil { - h.logger().Printf("problem sending DeleteDB message: %s", err) + h.logger().Printf("problem sending DeleteIndex message: %s", err) } // Encode response. - if err := json.NewEncoder(w).Encode(deleteDBResponse{}); err != nil { + if err := json.NewEncoder(w).Encode(deleteIndexResponse{}); err != nil { h.logger().Printf("response encoding error: %s", err) } } -type deleteDBResponse struct{} +type deleteIndexResponse struct{} -// handlePostDB handles POST /db request. -func (h *Handler) handlePostDB(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] +// handlePostIndex handles POST /index request. +func (h *Handler) handlePostIndex(w http.ResponseWriter, r *http.Request) { + indexName := mux.Vars(r)["index"] // Decode request. - var req postDBRequest + var req postIndexRequest err := json.NewDecoder(r.Body).Decode(&req) if err == io.EOF { - // If no data was provided (EOF), we still create the database + // If no data was provided (EOF), we still create the index // with default values. } else if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - // Create database. - _, err = h.Index.CreateDB(dbName, req.Options) - if err == ErrDatabaseExists { + // Create index. + _, err = h.Holder.CreateIndex(indexName, req.Options) + if err == ErrIndexExists { http.Error(w, err.Error(), http.StatusConflict) return } else if err != nil { @@ -368,28 +366,28 @@ func (h *Handler) handlePostDB(w http.ResponseWriter, r *http.Request) { return } - // Send the create database message to all nodes. + // Send the create index message to all nodes. err = h.Broadcaster.SendSync( - &internal.CreateDBMessage{ - DB: dbName, - Meta: req.Options.Encode(), + &internal.CreateIndexMessage{ + Index: indexName, + Meta: req.Options.Encode(), }) if err != nil { - h.logger().Printf("problem sending CreateDB message: %s", err) + h.logger().Printf("problem sending CreateIndex message: %s", err) } // Encode response. - if err := json.NewEncoder(w).Encode(postDBResponse{}); err != nil { + if err := json.NewEncoder(w).Encode(postIndexResponse{}); err != nil { h.logger().Printf("response encoding error: %s", err) } } -// handlePatchDBTimeQuantum handles PATCH /db/time_quantum request. -func (h *Handler) handlePatchDBTimeQuantum(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] +// handlePatchIndexTimeQuantum handles PATCH /index/time_quantum request. +func (h *Handler) handlePatchIndexTimeQuantum(w http.ResponseWriter, r *http.Request) { + indexName := mux.Vars(r)["index"] // Decode request. - var req patchDBTimeQuantumRequest + var req patchIndexTimeQuantumRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -402,51 +400,51 @@ func (h *Handler) handlePatchDBTimeQuantum(w http.ResponseWriter, r *http.Reques return } - // Retrieve database by name. - database := h.Index.DB(dbName) - if database == nil { - http.Error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound) + // Retrieve index by name. + index := h.Holder.Index(indexName) + if index == nil { + http.Error(w, ErrIndexNotFound.Error(), http.StatusNotFound) return } - // Set default time quantum on database. - if err := database.SetTimeQuantum(tq); err != nil { + // Set default time quantum on index. + if err := index.SetTimeQuantum(tq); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } // Encode response. - if err := json.NewEncoder(w).Encode(patchDBTimeQuantumResponse{}); err != nil { + if err := json.NewEncoder(w).Encode(patchIndexTimeQuantumResponse{}); err != nil { h.logger().Printf("response encoding error: %s", err) } } -type patchDBTimeQuantumRequest struct { +type patchIndexTimeQuantumRequest struct { TimeQuantum string `json:"timeQuantum"` } -type patchDBTimeQuantumResponse struct{} +type patchIndexTimeQuantumResponse struct{} -// handlePostDBAttrDiff handles POST /db/attr/diff requests. -func (h *Handler) handlePostDBAttrDiff(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] +// handlePostIndexAttrDiff handles POST /index/attr/diff requests. +func (h *Handler) handlePostIndexAttrDiff(w http.ResponseWriter, r *http.Request) { + indexName := mux.Vars(r)["index"] // Decode request. - var req postDBAttrDiffRequest + var req postIndexAttrDiffRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - // Retrieve database from index. - db := h.Index.DB(dbName) - if db == nil { - http.Error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound) + // Retrieve index from holder. + index := h.Holder.Index(indexName) + if index == nil { + http.Error(w, ErrIndexNotFound.Error(), http.StatusNotFound) return } // Retrieve local blocks. - blks, err := db.ProfileAttrStore().Blocks() + blks, err := index.ColumnAttrStore().Blocks() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -456,37 +454,37 @@ func (h *Handler) handlePostDBAttrDiff(w http.ResponseWriter, r *http.Request) { attrs := make(map[uint64]map[string]interface{}) for _, blockID := range AttrBlocks(blks).Diff(req.Blocks) { // Retrieve block data. - m, err := db.ProfileAttrStore().BlockData(blockID) + m, err := index.ColumnAttrStore().BlockData(blockID) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - // Copy to database-wide struct. + // Copy to index-wide struct. for k, v := range m { attrs[k] = v } } // Encode response. - if err := json.NewEncoder(w).Encode(postDBAttrDiffResponse{ + if err := json.NewEncoder(w).Encode(postIndexAttrDiffResponse{ Attrs: attrs, }); err != nil { h.logger().Printf("response encoding error: %s", err) } } -type postDBAttrDiffRequest struct { +type postIndexAttrDiffRequest struct { Blocks []AttrBlock `json:"blocks"` } -type postDBAttrDiffResponse struct { +type postIndexAttrDiffResponse struct { Attrs map[uint64]map[string]interface{} `json:"attrs"` } // handlePostFrame handles POST /frame request. func (h *Handler) handlePostFrame(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] // Decode request. @@ -500,15 +498,15 @@ func (h *Handler) handlePostFrame(w http.ResponseWriter, r *http.Request) { return } - // Find database. - db := h.Index.DB(dbName) - if db == nil { - http.Error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound) + // Find index. + index := h.Holder.Index(indexName) + if index == nil { + http.Error(w, ErrIndexNotFound.Error(), http.StatusNotFound) return } // Create frame. - _, err = db.CreateFrame(frameName, req.Options) + _, err = index.CreateFrame(frameName, req.Options) if err == ErrFrameExists { http.Error(w, err.Error(), http.StatusConflict) return @@ -520,7 +518,7 @@ func (h *Handler) handlePostFrame(w http.ResponseWriter, r *http.Request) { // Send the create frame message to all nodes. err = h.Broadcaster.SendSync( &internal.CreateFrameMessage{ - DB: dbName, + Index: indexName, Frame: frameName, Meta: req.Options.Encode(), }) @@ -581,20 +579,20 @@ type postFrameResponse struct{} // handleDeleteFrame handles DELETE /frame request. func (h *Handler) handleDeleteFrame(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] - // Find database. - db := h.Index.DB(dbName) - if db == nil { - if err := json.NewEncoder(w).Encode(deleteDBResponse{}); err != nil { + // Find index. + index := h.Holder.Index(indexName) + if index == nil { + if err := json.NewEncoder(w).Encode(deleteIndexResponse{}); err != nil { h.logger().Printf("response encoding error: %s", err) } return } - // Delete frame from the database. - if err := db.DeleteFrame(frameName); err != nil { + // Delete frame from the index. + if err := index.DeleteFrame(frameName); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -602,7 +600,7 @@ func (h *Handler) handleDeleteFrame(w http.ResponseWriter, r *http.Request) { // Send the delete frame message to all nodes. err := h.Broadcaster.SendSync( &internal.DeleteFrameMessage{ - DB: dbName, + Index: indexName, Frame: frameName, }) if err != nil { @@ -619,7 +617,7 @@ type deleteFrameResponse struct{} // handlePatchFrameTimeQuantum handles PATCH /frame/time_quantum request. func (h *Handler) handlePatchFrameTimeQuantum(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] // Decode request. @@ -636,14 +634,14 @@ func (h *Handler) handlePatchFrameTimeQuantum(w http.ResponseWriter, r *http.Req return } - // Retrieve database by name. - f := h.Index.Frame(dbName, frameName) + // Retrieve index by name. + f := h.Holder.Frame(indexName, frameName) if f == nil { http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return } - // Set default time quantum on database. + // Set default time quantum on index. if err := f.SetTimeQuantum(tq); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -663,11 +661,11 @@ type patchFrameTimeQuantumResponse struct{} // handleGetFrameViews handles GET /frame/views request. func (h *Handler) handleGetFrameViews(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] // Retrieve views. - f := h.Index.Frame(dbName, frameName) + f := h.Holder.Frame(indexName, frameName) if f == nil { http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return @@ -692,7 +690,7 @@ type getFrameViewsResponse struct { // handlePostFrameAttrDiff handles POST /frame/attr/diff requests. func (h *Handler) handlePostFrameAttrDiff(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] // Decode request. @@ -702,15 +700,15 @@ func (h *Handler) handlePostFrameAttrDiff(w http.ResponseWriter, r *http.Request return } - // Retrieve database from index. - f := h.Index.Frame(dbName, frameName) + // Retrieve index from holder. + f := h.Holder.Frame(indexName, frameName) if f == nil { http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return } // Retrieve local blocks. - blks, err := f.BitmapAttrStore().Blocks() + blks, err := f.RowAttrStore().Blocks() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -720,13 +718,13 @@ func (h *Handler) handlePostFrameAttrDiff(w http.ResponseWriter, r *http.Request attrs := make(map[uint64]map[string]interface{}) for _, blockID := range AttrBlocks(blks).Diff(req.Blocks) { // Retrieve block data. - m, err := f.BitmapAttrStore().BlockData(blockID) + m, err := f.RowAttrStore().BlockData(blockID) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - // Copy to database-wide struct. + // Copy to index-wide struct. for k, v := range m { attrs[k] = v } @@ -748,24 +746,24 @@ type postFrameAttrDiffResponse struct { Attrs map[uint64]map[string]interface{} `json:"attrs"` } -// readProfiles returns a list of profile objects by id. -func (h *Handler) readProfiles(db *DB, ids []uint64) ([]*Profile, error) { - if db == nil { +// readColumnAttrSets returns a list of column attribute objects by id. +func (h *Handler) readColumnAttrSets(index *Index, ids []uint64) ([]*ColumnAttrSet, error) { + if index == nil { return nil, nil } - a := make([]*Profile, 0, len(ids)) + a := make([]*ColumnAttrSet, 0, len(ids)) for _, id := range ids { - // Read attributes for profile. Skip profile if empty. - attrs, err := db.ProfileAttrStore().Attrs(id) + // Read attributes for column. Skip column if empty. + attrs, err := index.ColumnAttrStore().Attrs(id) if err != nil { return nil, err } else if len(attrs) == 0 { continue } - // Append profile with attributes. - a = append(a, &Profile{ID: id, Attrs: attrs}) + // Append column with attributes. + a = append(a, &ColumnAttrSet{ID: id, Attrs: attrs}) } return a, nil @@ -826,10 +824,10 @@ func (h *Handler) readURLQueryRequest(r *http.Request) (*QueryRequest, error) { } return &QueryRequest{ - Query: query, - Slices: slices, - Profiles: q.Get("profiles") == "true", - Quantum: quantum, + Query: query, + Slices: slices, + ColumnAttrs: q.Get("columnAttrs") == "true", + Quantum: quantum, }, nil } @@ -892,33 +890,33 @@ func (h *Handler) handlePostImport(w http.ResponseWriter, r *http.Request) { } // Validate that this handler owns the slice. - if !h.Cluster.OwnsFragment(h.Host, req.DB, req.Slice) { - mesg := fmt.Sprintf("host does not own slice %s-%s slice:%d", h.Host, req.DB, req.Slice) + if !h.Cluster.OwnsFragment(h.Host, req.Index, req.Slice) { + mesg := fmt.Sprintf("host does not own slice %s-%s slice:%d", h.Host, req.Index, req.Slice) http.Error(w, mesg, http.StatusPreconditionFailed) return } - // Find the DB. - h.logger().Println("importing:", req.DB, req.Frame, req.Slice) - db := h.Index.DB(req.DB) - if db == nil { - h.logger().Printf("fragment error: db=%s, frame=%s, slice=%d, err=%s", req.DB, req.Frame, req.Slice, ErrDatabaseNotFound.Error()) - http.Error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound) + // Find the Index. + h.logger().Println("importing:", req.Index, req.Frame, req.Slice) + index := h.Holder.Index(req.Index) + if index == nil { + h.logger().Printf("fragment error: index=%s, frame=%s, slice=%d, err=%s", req.Index, req.Frame, req.Slice, ErrIndexNotFound.Error()) + http.Error(w, ErrIndexNotFound.Error(), http.StatusNotFound) return } // Retrieve frame. - f := db.Frame(req.Frame) + f := index.Frame(req.Frame) if f == nil { - h.logger().Printf("frame error: db=%s, frame=%s, slice=%d, err=%s", req.DB, req.Frame, req.Slice, ErrFrameNotFound.Error()) + h.logger().Printf("frame error: index=%s, frame=%s, slice=%d, err=%s", req.Index, req.Frame, req.Slice, ErrFrameNotFound.Error()) http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return } // Import into fragment. - err = f.Import(req.BitmapIDs, req.ProfileIDs, timestamps) + err = f.Import(req.RowIDs, req.ColumnIDs, timestamps) if err != nil { - h.logger().Printf("import error: db=%s, frame=%s, slice=%d, bits=%d, err=%s", req.DB, req.Frame, req.Slice, len(req.ProfileIDs), err) + h.logger().Printf("import error: index=%s, frame=%s, slice=%d, bits=%d, err=%s", req.Index, req.Frame, req.Slice, len(req.ColumnIDs), err) return } @@ -949,7 +947,7 @@ func (h *Handler) handleGetExport(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleGetExportCSV(w http.ResponseWriter, r *http.Request) { // Parse query parameters. q := r.URL.Query() - db, frame, view := q.Get("db"), q.Get("frame"), q.Get("view") + index, frame, view := q.Get("index"), q.Get("frame"), q.Get("view") slice, err := strconv.ParseUint(q.Get("slice"), 10, 64) if err != nil { @@ -958,14 +956,14 @@ func (h *Handler) handleGetExportCSV(w http.ResponseWriter, r *http.Request) { } // Validate that this handler owns the slice. - if !h.Cluster.OwnsFragment(h.Host, db, slice) { - mesg := fmt.Sprintf("host does not own slice %s-%s slice:%d", h.Host, db, slice) + if !h.Cluster.OwnsFragment(h.Host, index, slice) { + mesg := fmt.Sprintf("host does not own slice %s-%s slice:%d", h.Host, index, slice) http.Error(w, mesg, http.StatusPreconditionFailed) return } // Find the fragment. - f := h.Index.Fragment(db, frame, view, slice) + f := h.Holder.Fragment(index, frame, view, slice) if f == nil { return } @@ -974,10 +972,10 @@ func (h *Handler) handleGetExportCSV(w http.ResponseWriter, r *http.Request) { cw := csv.NewWriter(w) // Iterate over each bit. - if err := f.ForEachBit(func(bitmapID, profileID uint64) error { + if err := f.ForEachBit(func(rowID, columnID uint64) error { return cw.Write([]string{ - strconv.FormatUint(bitmapID, 10), - strconv.FormatUint(profileID, 10), + strconv.FormatUint(rowID, 10), + strconv.FormatUint(columnID, 10), }) }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -991,7 +989,7 @@ func (h *Handler) handleGetExportCSV(w http.ResponseWriter, r *http.Request) { // handleGetFragmentNodes handles /fragment/nodes requests. func (h *Handler) handleGetFragmentNodes(w http.ResponseWriter, r *http.Request) { q := r.URL.Query() - db := q.Get("db") + index := q.Get("index") // Read slice parameter. slice, err := strconv.ParseUint(q.Get("slice"), 10, 64) @@ -1001,7 +999,7 @@ func (h *Handler) handleGetFragmentNodes(w http.ResponseWriter, r *http.Request) } // Retrieve fragment owner nodes. - nodes := h.Cluster.FragmentNodes(db, slice) + nodes := h.Cluster.FragmentNodes(index, slice) // Write to response. if err := json.NewEncoder(w).Encode(nodes); err != nil { @@ -1019,8 +1017,8 @@ func (h *Handler) handleGetFragmentData(w http.ResponseWriter, r *http.Request) return } - // Retrieve fragment from index. - f := h.Index.Fragment(q.Get("db"), q.Get("frame"), q.Get("view"), slice) + // Retrieve fragment from holder. + f := h.Holder.Fragment(q.Get("index"), q.Get("frame"), q.Get("view"), slice) if f == nil { http.Error(w, "fragment not found", http.StatusNotFound) return @@ -1043,7 +1041,7 @@ func (h *Handler) handlePostFragmentData(w http.ResponseWriter, r *http.Request) } // Retrieve frame. - f := h.Index.Frame(q.Get("db"), q.Get("frame")) + f := h.Holder.Frame(q.Get("index"), q.Get("frame")) if f == nil { http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return @@ -1082,8 +1080,8 @@ func (h *Handler) handleGetFragmentBlockData(w http.ResponseWriter, r *http.Requ return } - // Retrieve fragment from index. - f := h.Index.Fragment(req.DB, req.Frame, req.View, req.Slice) + // Retrieve fragment from holder. + f := h.Holder.Fragment(req.Index, req.Frame, req.View, req.Slice) if f == nil { http.Error(w, ErrFragmentNotFound.Error(), http.StatusNotFound) return @@ -1092,7 +1090,7 @@ func (h *Handler) handleGetFragmentBlockData(w http.ResponseWriter, r *http.Requ // Read data var resp internal.BlockDataResponse if f != nil { - resp.BitmapIDs, resp.ProfileIDs = f.BlockData(int(req.Block)) + resp.RowIDs, resp.ColumnIDs = f.BlockData(int(req.Block)) } // Encode response. @@ -1118,8 +1116,8 @@ func (h *Handler) handleGetFragmentBlocks(w http.ResponseWriter, r *http.Request return } - // Retrieve fragment from index. - f := h.Index.Fragment(q.Get("db"), q.Get("frame"), q.Get("view"), slice) + // Retrieve fragment from holder. + f := h.Holder.Fragment(q.Get("index"), q.Get("frame"), q.Get("view"), slice) if f == nil { http.Error(w, "fragment not found", http.StatusNotFound) return @@ -1142,7 +1140,7 @@ type getFragmentBlocksResponse struct { // handlePostFrameRestore handles POST /frame/restore requests. func (h *Handler) handlePostFrameRestore(w http.ResponseWriter, r *http.Request) { - dbName := mux.Vars(r)["db"] + indexName := mux.Vars(r)["index"] frameName := mux.Vars(r)["frame"] q := r.URL.Query() @@ -1162,30 +1160,30 @@ func (h *Handler) handlePostFrameRestore(w http.ResponseWriter, r *http.Request) } // Determine the maximum number of slices. - maxSlices, err := client.MaxSliceByDatabase(r.Context()) + maxSlices, err := client.MaxSliceByIndex(r.Context()) if err != nil { http.Error(w, "cannot determine remote slice count: "+err.Error(), http.StatusInternalServerError) return } // Retrieve frame. - f := h.Index.Frame(dbName, frameName) + f := h.Holder.Frame(indexName, frameName) if f == nil { http.Error(w, ErrFrameNotFound.Error(), http.StatusNotFound) return } // Retrieve list of all views. - views, err := client.FrameViews(r.Context(), dbName, frameName) + views, err := client.FrameViews(r.Context(), indexName, frameName) if err != nil { http.Error(w, "cannot retrieve frame views: "+err.Error(), http.StatusInternalServerError) return } // Loop over each slice and import it if this node owns it. - for slice := uint64(0); slice <= maxSlices[dbName]; slice++ { + for slice := uint64(0); slice <= maxSlices[indexName]; slice++ { // Ignore this slice if we don't own it. - if !h.Cluster.OwnsFragment(h.Host, dbName, slice) { + if !h.Cluster.OwnsFragment(h.Host, indexName, slice) { continue } @@ -1206,7 +1204,7 @@ func (h *Handler) handlePostFrameRestore(w http.ResponseWriter, r *http.Request) } // Stream backup from remote node. - rd, err := client.BackupSlice(r.Context(), dbName, frameName, view, slice) + rd, err := client.BackupSlice(r.Context(), indexName, frameName, view, slice) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1270,8 +1268,8 @@ func (h *Handler) logger() *log.Logger { // QueryRequest represent a request to process a query. type QueryRequest struct { - // Database to execute query against. - DB string + // Index to execute query against. + Index string // The query string to parse and execute. Query string @@ -1280,8 +1278,8 @@ type QueryRequest struct { // If empty, all slices are included. Slices []uint64 - // Return profile attributes, if true. - Profiles bool + // Return column attributes, if true. + ColumnAttrs bool // Time granularity to use with the timestamp. Quantum TimeQuantum @@ -1293,11 +1291,11 @@ type QueryRequest struct { func decodeQueryRequest(pb *internal.QueryRequest) *QueryRequest { req := &QueryRequest{ - Query: pb.Query, - Slices: pb.Slices, - Profiles: pb.Profiles, - Quantum: TimeQuantum(pb.Quantum), - Remote: pb.Remote, + Query: pb.Query, + Slices: pb.Slices, + ColumnAttrs: pb.ColumnAttrs, + Quantum: TimeQuantum(pb.Quantum), + Remote: pb.Remote, } return req @@ -1309,8 +1307,8 @@ type QueryResponse struct { // Can be a Bitmap, Pairs, or uint64. Results []interface{} - // Set of profiles matching IDs returned in Result. - Profiles []*Profile + // Set of column attribute objects matching IDs returned in Result. + ColumnAttrSets []*ColumnAttrSet // Error during parsing or execution. Err error @@ -1318,12 +1316,12 @@ type QueryResponse struct { func (resp *QueryResponse) MarshalJSON() ([]byte, error) { var output struct { - Results []interface{} `json:"results,omitempty"` - Profiles []*Profile `json:"profiles,omitempty"` - Err string `json:"error,omitempty"` + Results []interface{} `json:"results,omitempty"` + ColumnAttrSets []*ColumnAttrSet `json:"columnAttrs,omitempty"` + Err string `json:"error,omitempty"` } output.Results = resp.Results - output.Profiles = resp.Profiles + output.ColumnAttrSets = resp.ColumnAttrSets if resp.Err != nil { output.Err = resp.Err.Error() @@ -1333,8 +1331,8 @@ func (resp *QueryResponse) MarshalJSON() ([]byte, error) { func encodeQueryResponse(resp *QueryResponse) *internal.QueryResponse { pb := &internal.QueryResponse{ - Results: make([]*internal.QueryResult, len(resp.Results)), - Profiles: encodeProfiles(resp.Profiles), + Results: make([]*internal.QueryResult, len(resp.Results)), + ColumnAttrSets: encodeColumnAttrSets(resp.ColumnAttrSets), } for i := range resp.Results { diff --git a/handler_internal_test.go b/handler_internal_test.go index 92221f1d7..f749c23aa 100644 --- a/handler_internal_test.go +++ b/handler_internal_test.go @@ -6,21 +6,21 @@ import ( "testing" ) -// Test custom UnmarshalJSON for postDBRequest object -func TestPostDBRequestUnmarshalJSON(t *testing.T) { +// Test custom UnmarshalJSON for postIndexRequest object +func TestPostIndexRequestUnmarshalJSON(t *testing.T) { tests := []struct { json string - expected postDBRequest + expected postIndexRequest err string }{ - {json: `{"options": {}}`, expected: postDBRequest{Options: DBOptions{}}}, + {json: `{"options": {}}`, expected: postIndexRequest{Options: IndexOptions{}}}, {json: `{"options": 4}`, err: "options is not map[string]interface{}"}, {json: `{"option": {}}`, err: "Unknown key: option:map[]"}, - {json: `{"options": {"columnLabel": "test"}}`, expected: postDBRequest{Options: DBOptions{ColumnLabel: "test"}}}, + {json: `{"options": {"columnLabel": "test"}}`, expected: postIndexRequest{Options: IndexOptions{ColumnLabel: "test"}}}, {json: `{"options": {"columnLabl": "test"}}`, err: "Unknown key: columnLabl:test"}, } for _, test := range tests { - actual := &postDBRequest{} + actual := &postIndexRequest{} err := json.Unmarshal([]byte(test.json), actual) if err != nil { diff --git a/handler_test.go b/handler_test.go index 049fdc036..d0b467f90 100644 --- a/handler_test.go +++ b/handler_test.go @@ -31,69 +31,69 @@ func TestHandler_NotFound(t *testing.T) { // Ensure the handler can return the schema. func TestHandler_Schema(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - d0 := idx.MustCreateDBIfNotExists("d0", pilosa.DBOptions{}) - d1 := idx.MustCreateDBIfNotExists("d1", pilosa.DBOptions{}) + i0 := hldr.MustCreateIndexIfNotExists("i0", pilosa.IndexOptions{}) + i1 := hldr.MustCreateIndexIfNotExists("i1", pilosa.IndexOptions{}) - if f, err := d0.CreateFrameIfNotExists("f1", pilosa.FrameOptions{InverseEnabled: true}); err != nil { + if f, err := i0.CreateFrameIfNotExists("f1", pilosa.FrameOptions{InverseEnabled: true}); err != nil { t.Fatal(err) } else if _, err := f.SetBit(pilosa.ViewStandard, 0, 0, nil); err != nil { t.Fatal(err) } else if _, err := f.SetBit(pilosa.ViewInverse, 0, 0, nil); err != nil { t.Fatal(err) } - if f, err := d1.CreateFrameIfNotExists("f0", pilosa.FrameOptions{}); err != nil { + if f, err := i1.CreateFrameIfNotExists("f0", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } else if _, err := f.SetBit(pilosa.ViewStandard, 0, 0, nil); err != nil { t.Fatal(err) } - if _, err := d0.CreateFrameIfNotExists("f0", pilosa.FrameOptions{}); err != nil { + if _, err := i0.CreateFrameIfNotExists("f0", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() h.ServeHTTP(w, MustNewHTTPRequest("GET", "/schema", nil)) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) - } else if body := w.Body.String(); body != `{"dbs":[{"name":"d0","frames":[{"name":"f0"},{"name":"f1","views":[{"name":"inverse"},{"name":"standard"}]}]},{"name":"d1","frames":[{"name":"f0","views":[{"name":"standard"}]}]}]}`+"\n" { + } else if body := w.Body.String(); body != `{"indexes":[{"name":"i0","frames":[{"name":"f0"},{"name":"f1","views":[{"name":"inverse"},{"name":"standard"}]}]},{"name":"i1","frames":[{"name":"f0","views":[{"name":"standard"}]}]}]}`+"\n" { t.Fatalf("unexpected body: %s", body) } } // Ensure the handler can return the maxslice map. func TestHandler_MaxSlices(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - idx.MustCreateFragmentIfNotExists("d0", "f0", pilosa.ViewStandard, 1).MustSetBits(30, (1*SliceWidth)+1) - idx.MustCreateFragmentIfNotExists("d0", "f0", pilosa.ViewStandard, 1).MustSetBits(30, (1*SliceWidth)+2) - idx.MustCreateFragmentIfNotExists("d0", "f0", pilosa.ViewStandard, 3).MustSetBits(30, (3*SliceWidth)+4) + hldr.MustCreateFragmentIfNotExists("i0", "f0", pilosa.ViewStandard, 1).MustSetBits(30, (1*SliceWidth)+1) + hldr.MustCreateFragmentIfNotExists("i0", "f0", pilosa.ViewStandard, 1).MustSetBits(30, (1*SliceWidth)+2) + hldr.MustCreateFragmentIfNotExists("i0", "f0", pilosa.ViewStandard, 3).MustSetBits(30, (3*SliceWidth)+4) - idx.MustCreateFragmentIfNotExists("d1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+1) - idx.MustCreateFragmentIfNotExists("d1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+2) - idx.MustCreateFragmentIfNotExists("d1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+8) + hldr.MustCreateFragmentIfNotExists("i1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+1) + hldr.MustCreateFragmentIfNotExists("i1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+2) + hldr.MustCreateFragmentIfNotExists("i1", "f1", pilosa.ViewStandard, 0).MustSetBits(40, (0*SliceWidth)+8) h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() h.ServeHTTP(w, MustNewHTTPRequest("GET", "/slices/max", nil)) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) - } else if body := w.Body.String(); body != `{"maxSlices":{"d0":3,"d1":0}}`+"\n" { + } else if body := w.Body.String(); body != `{"maxSlices":{"i0":3,"i1":0}}`+"\n" { t.Fatalf("unexpected body: %s", body) } } // Ensure the handler can return the maxslice map for the inverse views. func TestHandler_MaxSlices_Inverse(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() - f0, err := idx.MustCreateDBIfNotExists("d0", pilosa.DBOptions{}).CreateFrame("f0", pilosa.FrameOptions{InverseEnabled: true}) + f0, err := hldr.MustCreateIndexIfNotExists("i0", pilosa.IndexOptions{}).CreateFrame("f0", pilosa.FrameOptions{InverseEnabled: true}) if err != nil { t.Fatal(err) } @@ -105,7 +105,7 @@ func TestHandler_MaxSlices_Inverse(t *testing.T) { t.Fatal(err) } - f1, err := idx.MustCreateDBIfNotExists("d1", pilosa.DBOptions{}).CreateFrame("f1", pilosa.FrameOptions{InverseEnabled: true}) + f1, err := hldr.MustCreateIndexIfNotExists("i1", pilosa.IndexOptions{}).CreateFrame("f1", pilosa.FrameOptions{InverseEnabled: true}) if err != nil { t.Fatal(err) } @@ -118,12 +118,12 @@ func TestHandler_MaxSlices_Inverse(t *testing.T) { } h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() h.ServeHTTP(w, MustNewHTTPRequest("GET", "/slices/max?inverse=true", nil)) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) - } else if body := w.Body.String(); body != `{"maxSlices":{"d0":3,"d1":0}}`+"\n" { + } else if body := w.Body.String(); body != `{"maxSlices":{"i0":3,"i1":0}}`+"\n" { t.Fatalf("unexpected body: %s", body) } } @@ -131,9 +131,9 @@ func TestHandler_MaxSlices_Inverse(t *testing.T) { // Ensure the handler can accept URL arguments. func TestHandler_Query_Args_URL(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != "db0" { - t.Fatalf("unexpected db: %s", db) + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != "idx0" { + t.Fatalf("unexpected index: %s", index) } else if query.String() != `Count(Bitmap(id=100))` { t.Fatalf("unexpected query: %s", query.String()) } else if !reflect.DeepEqual(slices, []uint64{0, 1}) { @@ -143,7 +143,7 @@ func TestHandler_Query_Args_URL(t *testing.T) { } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/db0/query?slices=0,1", strings.NewReader("Count( Bitmap( id=100))"))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/idx0/query?slices=0,1", strings.NewReader("Count( Bitmap( id=100))"))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code, w.Body.String()) } else if body := w.Body.String(); body != `{"results":[100]}`+"\n" { @@ -154,9 +154,9 @@ func TestHandler_Query_Args_URL(t *testing.T) { // Ensure the handler can accept arguments via protobufs. func TestHandler_Query_Args_Protobuf(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - if db != "db0" { - t.Fatalf("unexpected db: %s", db) + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + if index != "idx0" { + t.Fatalf("unexpected index: %s", index) } else if query.String() != `Count(Bitmap(id=100))` { t.Fatalf("unexpected query: %s", query.String()) } else if !reflect.DeepEqual(slices, []uint64{0, 1}) { @@ -175,7 +175,7 @@ func TestHandler_Query_Args_Protobuf(t *testing.T) { } // Generate protobuf request. - req := MustNewHTTPRequest("POST", "/db/db0/query", bytes.NewReader(reqBody)) + req := MustNewHTTPRequest("POST", "/index/idx0/query", bytes.NewReader(reqBody)) req.Header.Set("Content-Type", "application/x-protobuf") w := httptest.NewRecorder() @@ -188,7 +188,7 @@ func TestHandler_Query_Args_Protobuf(t *testing.T) { // Ensure the handler returns an error when parsing bad arguments. func TestHandler_Query_Args_Err(t *testing.T) { w := httptest.NewRecorder() - NewHandler().ServeHTTP(w, MustNewHTTPRequest("POST", "/db/db0/query?slices=a,b", strings.NewReader("Bitmap(id=100)"))) + NewHandler().ServeHTTP(w, MustNewHTTPRequest("POST", "/index/idx0/query?slices=a,b", strings.NewReader("Bitmap(id=100)"))) if w.Code != http.StatusBadRequest { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"error":"invalid slice argument"}`+"\n" { @@ -199,12 +199,12 @@ func TestHandler_Query_Args_Err(t *testing.T) { // Ensure the handler can execute a query with a uint64 response as JSON. func TestHandler_Query_Uint64_JSON(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return []interface{}{uint64(100)}, nil } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/db0/query?slices=0,1", strings.NewReader("Count( Bitmap( id=100))"))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/idx0/query?slices=0,1", strings.NewReader("Count( Bitmap( id=100))"))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"results":[100]}`+"\n" { @@ -215,12 +215,12 @@ func TestHandler_Query_Uint64_JSON(t *testing.T) { // Ensure the handler can execute a query with a uint64 response as protobufs. func TestHandler_Query_Uint64_Protobuf(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return []interface{}{uint64(100)}, nil } w := httptest.NewRecorder() - r := MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader("Count(Bitmap(id=100))")) + r := MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader("Count(Bitmap(id=100))")) r.Header.Set("Accept", "application/x-protobuf") h.ServeHTTP(w, r) if w.Code != http.StatusOK { @@ -238,14 +238,14 @@ func TestHandler_Query_Uint64_Protobuf(t *testing.T) { // Ensure the handler can execute a query that returns a bitmap as JSON. func TestHandler_Query_Bitmap_JSON(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { bm := pilosa.NewBitmap(1, 3, 66, pilosa.SliceWidth+1) bm.Attrs = map[string]interface{}{"a": "b", "c": 1, "d": true} return []interface{}{bm}, nil } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader("Bitmap(id=100)"))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader("Bitmap(id=100)"))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"results":[{"attrs":{"a":"b","c":1,"d":true},"bits":[1,3,66,1048577]}]}`+"\n" { @@ -253,34 +253,34 @@ func TestHandler_Query_Bitmap_JSON(t *testing.T) { } } -// Ensure the handler can execute a query that returns a bitmap with profiles as JSON. -func TestHandler_Query_Bitmap_Profiles_JSON(t *testing.T) { - idx := NewIndex() - defer idx.Close() +// Ensure the handler can execute a query that returns a bitmap with column attributes as JSON. +func TestHandler_Query_Bitmap_ColumnAttrs_JSON(t *testing.T) { + hldr := NewHolder() + defer hldr.Close() - // Create database and set profile attributes. - db, err := idx.CreateDBIfNotExists("d", pilosa.DBOptions{}) + // Create index and set column attributes. + index, err := hldr.CreateIndexIfNotExists("i", pilosa.IndexOptions{}) if err != nil { t.Fatal(err) - } else if err := db.ProfileAttrStore().SetAttrs(3, map[string]interface{}{"x": "y"}); err != nil { + } else if err := index.ColumnAttrStore().SetAttrs(3, map[string]interface{}{"x": "y"}); err != nil { t.Fatal(err) - } else if err := db.ProfileAttrStore().SetAttrs(66, map[string]interface{}{"y": 123, "z": false}); err != nil { + } else if err := index.ColumnAttrStore().SetAttrs(66, map[string]interface{}{"y": 123, "z": false}); err != nil { t.Fatal(err) } h := NewHandler() - h.Index = idx.Index - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Holder = hldr.Holder + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { bm := pilosa.NewBitmap(1, 3, 66, pilosa.SliceWidth+1) bm.Attrs = map[string]interface{}{"a": "b", "c": 1, "d": true} return []interface{}{bm}, nil } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/d/query?profiles=true", strings.NewReader("Bitmap(id=100)"))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/i/query?columnAttrs=true", strings.NewReader("Bitmap(id=100)"))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) - } else if body := w.Body.String(); body != `{"results":[{"attrs":{"a":"b","c":1,"d":true},"bits":[1,3,66,1048577]}],"profiles":[{"id":3,"attrs":{"x":"y"}},{"id":66,"attrs":{"y":123,"z":false}}]}`+"\n" { + } else if body := w.Body.String(); body != `{"results":[{"attrs":{"a":"b","c":1,"d":true},"bits":[1,3,66,1048577]}],"columnAttrs":[{"id":3,"attrs":{"x":"y"}},{"id":66,"attrs":{"y":123,"z":false}}]}`+"\n" { t.Fatalf("unexpected body: %s", body) } } @@ -288,14 +288,14 @@ func TestHandler_Query_Bitmap_Profiles_JSON(t *testing.T) { // Ensure the handler can execute a query that returns a bitmap as protobuf. func TestHandler_Query_Bitmap_Protobuf(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { bm := pilosa.NewBitmap(1, pilosa.SliceWidth+1) bm.Attrs = map[string]interface{}{"a": "b", "c": int64(1), "d": true} return []interface{}{bm}, nil } w := httptest.NewRecorder() - r := MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader("Bitmap(id=100)")) + r := MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader("Bitmap(id=100)")) r.Header.Set("Accept", "application/x-protobuf") h.ServeHTTP(w, r) if w.Code != http.StatusOK { @@ -318,22 +318,22 @@ func TestHandler_Query_Bitmap_Protobuf(t *testing.T) { } } -// Ensure the handler can execute a query that returns a bitmap with profiles as protobuf. -func TestHandler_Query_Bitmap_Profiles_Protobuf(t *testing.T) { - idx := NewIndex() - defer idx.Close() +// Ensure the handler can execute a query that returns a bitmap with column attributes as protobuf. +func TestHandler_Query_Bitmap_ColumnAttrs_Protobuf(t *testing.T) { + hldr := NewHolder() + defer hldr.Close() - // Create database and set profile attributes. - db, err := idx.CreateDBIfNotExists("d", pilosa.DBOptions{}) + // Create index and set column attributes. + index, err := hldr.CreateIndexIfNotExists("i", pilosa.IndexOptions{}) if err != nil { t.Fatal(err) - } else if err := db.ProfileAttrStore().SetAttrs(1, map[string]interface{}{"x": "y"}); err != nil { + } else if err := index.ColumnAttrStore().SetAttrs(1, map[string]interface{}{"x": "y"}); err != nil { t.Fatal(err) } h := NewHandler() - h.Index = idx.Index - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Holder = hldr.Holder + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { bm := pilosa.NewBitmap(1, pilosa.SliceWidth+1) bm.Attrs = map[string]interface{}{"a": "b", "c": int64(1), "d": true} return []interface{}{bm}, nil @@ -341,15 +341,15 @@ func TestHandler_Query_Bitmap_Profiles_Protobuf(t *testing.T) { // Encode request body. buf, err := proto.Marshal(&internal.QueryRequest{ - Query: "Bitmap(id=100)", - Profiles: true, + Query: "Bitmap(id=100)", + ColumnAttrs: true, }) if err != nil { t.Fatal(err) } w := httptest.NewRecorder() - r := MustNewHTTPRequest("POST", "/db/d/query", bytes.NewReader(buf)) + r := MustNewHTTPRequest("POST", "/index/i/query", bytes.NewReader(buf)) r.Header.Set("Content-Type", "application/x-protobuf") r.Header.Set("Accept", "application/x-protobuf") h.ServeHTTP(w, r) @@ -373,12 +373,12 @@ func TestHandler_Query_Bitmap_Profiles_Protobuf(t *testing.T) { t.Fatalf("unexpected attr[2]: %s=%v", k, v) } - if a := resp.Profiles; len(a) != 1 { - t.Fatalf("unexpected profiles length: %d", len(a)) + if a := resp.ColumnAttrSets; len(a) != 1 { + t.Fatalf("unexpected column attributes length: %d", len(a)) } else if a[0].ID != 1 { t.Fatalf("unexpected id: %d", a[0].ID) } else if len(a[0].Attrs) != 1 { - t.Fatalf("unexpected profile attr length: %d", len(a)) + t.Fatalf("unexpected column attr length: %d", len(a)) } else if k, v := a[0].Attrs[0].Key, a[0].Attrs[0].StringValue; k != "x" || v != "y" { t.Fatalf("unexpected attr[0]: %s=%v", k, v) } @@ -387,7 +387,7 @@ func TestHandler_Query_Bitmap_Profiles_Protobuf(t *testing.T) { // Ensure the handler can execute a query that returns pairs as JSON. func TestHandler_Query_Pairs_JSON(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return []interface{}{[]pilosa.Pair{ {ID: 1, Count: 2}, {ID: 3, Count: 4}, @@ -395,7 +395,7 @@ func TestHandler_Query_Pairs_JSON(t *testing.T) { } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader(`TopN(frame=x, n=2)`))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader(`TopN(frame=x, n=2)`))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"results":[[{"id":1,"count":2},{"id":3,"count":4}]]}`+"\n" { @@ -406,7 +406,7 @@ func TestHandler_Query_Pairs_JSON(t *testing.T) { // Ensure the handler can execute a query that returns pairs as protobuf. func TestHandler_Query_Pairs_Protobuf(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return []interface{}{[]pilosa.Pair{ {ID: 1, Count: 2}, {ID: 3, Count: 4}, @@ -414,7 +414,7 @@ func TestHandler_Query_Pairs_Protobuf(t *testing.T) { } w := httptest.NewRecorder() - r := MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader(`TopN(frame=x, n=2)`)) + r := MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader(`TopN(frame=x, n=2)`)) r.Header.Set("Accept", "application/x-protobuf") h.ServeHTTP(w, r) if w.Code != http.StatusOK { @@ -432,12 +432,12 @@ func TestHandler_Query_Pairs_Protobuf(t *testing.T) { // Ensure the handler can return an error as JSON. func TestHandler_Query_Err_JSON(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return nil, errors.New("marker") } w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader(`Bitmap(id=100)`))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader(`Bitmap(id=100)`))) if w.Code != http.StatusInternalServerError { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"error":"marker"}`+"\n" { @@ -448,12 +448,12 @@ func TestHandler_Query_Err_JSON(t *testing.T) { // Ensure the handler can return an error as protobuf. func TestHandler_Query_Err_Protobuf(t *testing.T) { h := NewHandler() - h.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + h.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { return nil, errors.New("marker") } w := httptest.NewRecorder() - r := MustNewHTTPRequest("POST", "/db/d/query", strings.NewReader(`TopN(frame=x, n=2)`)) + r := MustNewHTTPRequest("POST", "/index/i/query", strings.NewReader(`TopN(frame=x, n=2)`)) r.Header.Set("Accept", "application/x-protobuf") h.ServeHTTP(w, r) if w.Code != http.StatusInternalServerError { @@ -471,7 +471,7 @@ func TestHandler_Query_Err_Protobuf(t *testing.T) { // Ensure the handler returns "method not allowed" for non-POST queries. func TestHandler_Query_MethodNotAllowed(t *testing.T) { w := httptest.NewRecorder() - NewHandler().ServeHTTP(w, MustNewHTTPRequest("GET", "/db/d/query", nil)) + NewHandler().ServeHTTP(w, MustNewHTTPRequest("GET", "/index/i/query", nil)) if w.Code != http.StatusMethodNotAllowed { t.Fatalf("invalid status: %d", w.Code) } @@ -481,7 +481,7 @@ func TestHandler_Query_MethodNotAllowed(t *testing.T) { func TestHandler_Query_ErrParse(t *testing.T) { h := NewHandler() w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("POST", "/db/db0/query?slices=0,1", strings.NewReader("bad_fn("))) + h.ServeHTTP(w, MustNewHTTPRequest("POST", "/index/idx0/query?slices=0,1", strings.NewReader("bad_fn("))) if w.Code != http.StatusBadRequest { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{"error":"expected comma, right paren, or identifier, found \"\" occurred at line 1, char 8"}`+"\n" { @@ -489,22 +489,22 @@ func TestHandler_Query_ErrParse(t *testing.T) { } } -// Ensure the handler can delete a database. -func TestHandler_DB_Delete(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() +// Ensure the handler can delete an index. +func TestHandler_Index_Delete(t *testing.T) { + hldr := MustOpenHolder() + defer hldr.Close() s := NewServer() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder defer s.Close() - // Create database. - if _, err := idx.CreateDBIfNotExists("d", pilosa.DBOptions{}); err != nil { + // Create index. + if _, err := hldr.CreateIndexIfNotExists("i", pilosa.IndexOptions{}); err != nil { t.Fatal(err) } - // Send request to delete database. - resp, err := http.DefaultClient.Do(MustNewHTTPRequest("DELETE", s.URL+"/db/d", strings.NewReader(""))) + // Send request to delete index. + resp, err := http.DefaultClient.Do(MustNewHTTPRequest("DELETE", s.URL+"/index/i", strings.NewReader(""))) if err != nil { t.Fatal(err) } @@ -519,100 +519,100 @@ func TestHandler_DB_Delete(t *testing.T) { t.Fatalf("unexpected response body: %s", buf) } - // Verify database is gone. - if idx.DB("d") != nil { - t.Fatal("expected nil database") + // Verify index is gone. + if hldr.Index("i") != nil { + t.Fatal("expected nil index") } } // Ensure handler can delete a frame. func TestHandler_DeleteFrame(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - d0 := idx.MustCreateDBIfNotExists("d0", pilosa.DBOptions{}) - if _, err := d0.CreateFrameIfNotExists("f1", pilosa.FrameOptions{}); err != nil { + hldr := MustOpenHolder() + defer hldr.Close() + i0 := hldr.MustCreateIndexIfNotExists("i0", pilosa.IndexOptions{}) + if _, err := i0.CreateFrameIfNotExists("f1", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("DELETE", "/db/d0/frame/f1", strings.NewReader(""))) + h.ServeHTTP(w, MustNewHTTPRequest("DELETE", "/index/i0/frame/f1", strings.NewReader(""))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{}`+"\n" { t.Fatalf("unexpected body: %s", body) - } else if f := idx.DB("d0").Frame("f1"); f != nil { + } else if f := hldr.Index("i0").Frame("f1"); f != nil { t.Fatal("expected nil frame") } } -// Ensure handler can set the DB time quantum. -func TestHandler_SetDBTimeQuantum(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() - idx.MustCreateDBIfNotExists("d0", pilosa.DBOptions{}) +// Ensure handler can set the Index time quantum. +func TestHandler_SetIndexTimeQuantum(t *testing.T) { + hldr := MustOpenHolder() + defer hldr.Close() + hldr.MustCreateIndexIfNotExists("i0", pilosa.IndexOptions{}) h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("PATCH", "/db/d0/time-quantum", strings.NewReader(`{"timeQuantum":"ymdh"}`))) + h.ServeHTTP(w, MustNewHTTPRequest("PATCH", "/index/i0/time-quantum", strings.NewReader(`{"timeQuantum":"ymdh"}`))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{}`+"\n" { t.Fatalf("unexpected body: %s", body) - } else if q := idx.DB("d0").TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { + } else if q := hldr.Index("i0").TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { t.Fatalf("unexpected time quantum: %s", q) } } // Ensure handler can set the frame time quantum. func TestHandler_SetFrameTimeQuantum(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() // Create frame. - if _, err := idx.MustCreateDBIfNotExists("d0", pilosa.DBOptions{}).CreateFrame("f1", pilosa.FrameOptions{}); err != nil { + if _, err := hldr.MustCreateIndexIfNotExists("i0", pilosa.IndexOptions{}).CreateFrame("f1", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } h := NewHandler() - h.Index = idx.Index + h.Holder = hldr.Holder w := httptest.NewRecorder() - h.ServeHTTP(w, MustNewHTTPRequest("PATCH", "/db/d0/frame/f1/time-quantum", strings.NewReader(`{"timeQuantum":"ymdh"}`))) + h.ServeHTTP(w, MustNewHTTPRequest("PATCH", "/index/i0/frame/f1/time-quantum", strings.NewReader(`{"timeQuantum":"ymdh"}`))) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) } else if body := w.Body.String(); body != `{}`+"\n" { t.Fatalf("unexpected body: %s", body) - } else if q := idx.DB("d0").Frame("f1").TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { + } else if q := hldr.Index("i0").Frame("f1").TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { t.Fatalf("unexpected time quantum: %s", q) } } -// Ensure the handler can return data in differing blocks for a database. -func TestHandler_DB_AttrStore_Diff(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() +// Ensure the handler can return data in differing blocks for an index. +func TestHandler_Index_AttrStore_Diff(t *testing.T) { + hldr := MustOpenHolder() + defer hldr.Close() s := NewServer() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder defer s.Close() - // Set attributes on the database. - db, err := idx.CreateDBIfNotExists("d", pilosa.DBOptions{}) + // Set attributes on the index. + index, err := hldr.CreateIndexIfNotExists("i", pilosa.IndexOptions{}) if err != nil { t.Fatal(err) } - if err := db.ProfileAttrStore().SetAttrs(1, map[string]interface{}{"foo": 1, "bar": 2}); err != nil { + if err := index.ColumnAttrStore().SetAttrs(1, map[string]interface{}{"foo": 1, "bar": 2}); err != nil { t.Fatal(err) - } else if err := db.ProfileAttrStore().SetAttrs(100, map[string]interface{}{"x": "y"}); err != nil { + } else if err := index.ColumnAttrStore().SetAttrs(100, map[string]interface{}{"x": "y"}); err != nil { t.Fatal(err) - } else if err := db.ProfileAttrStore().SetAttrs(200, map[string]interface{}{"snowman": "☃"}); err != nil { + } else if err := index.ColumnAttrStore().SetAttrs(200, map[string]interface{}{"snowman": "☃"}); err != nil { t.Fatal(err) } // Retrieve block checksums. - blks, err := db.ProfileAttrStore().Blocks() + blks, err := index.ColumnAttrStore().Blocks() if err != nil { t.Fatal(err) } @@ -623,7 +623,7 @@ func TestHandler_DB_AttrStore_Diff(t *testing.T) { // Send block checksums to determine diff. resp, err := http.Post( - s.URL+"/db/d/attr/diff", + s.URL+"/index/i/attr/diff", "application/json", strings.NewReader(`{"blocks":`+string(MustMarshalJSON(blks))+`}`), ) @@ -640,29 +640,29 @@ func TestHandler_DB_AttrStore_Diff(t *testing.T) { // Ensure the handler can return data in differing blocks for a frame. func TestHandler_Frame_AttrStore_Diff(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() s := NewServer() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder defer s.Close() - // Set attributes on the database. - d := idx.MustCreateDBIfNotExists("d", pilosa.DBOptions{}) - f, err := d.CreateFrameIfNotExists("meta", pilosa.FrameOptions{}) + // Set attributes on the index. + idx := hldr.MustCreateIndexIfNotExists("i", pilosa.IndexOptions{}) + f, err := idx.CreateFrameIfNotExists("meta", pilosa.FrameOptions{}) if err != nil { t.Fatal(err) } - if err := f.BitmapAttrStore().SetAttrs(1, map[string]interface{}{"foo": 1, "bar": 2}); err != nil { + if err := f.RowAttrStore().SetAttrs(1, map[string]interface{}{"foo": 1, "bar": 2}); err != nil { t.Fatal(err) - } else if err := f.BitmapAttrStore().SetAttrs(100, map[string]interface{}{"x": "y"}); err != nil { + } else if err := f.RowAttrStore().SetAttrs(100, map[string]interface{}{"x": "y"}); err != nil { t.Fatal(err) - } else if err := f.BitmapAttrStore().SetAttrs(200, map[string]interface{}{"snowman": "☃"}); err != nil { + } else if err := f.RowAttrStore().SetAttrs(200, map[string]interface{}{"snowman": "☃"}); err != nil { t.Fatal(err) } // Retrieve block checksums. - blks, err := f.BitmapAttrStore().Blocks() + blks, err := f.RowAttrStore().Blocks() if err != nil { t.Fatal(err) } @@ -673,7 +673,7 @@ func TestHandler_Frame_AttrStore_Diff(t *testing.T) { // Send block checksums to determine diff. resp, err := http.Post( - s.URL+"/db/d/frame/meta/attr/diff", + s.URL+"/index/i/frame/meta/attr/diff", "application/json", strings.NewReader(`{"blocks":`+string(MustMarshalJSON(blks))+`}`), ) @@ -690,19 +690,19 @@ func TestHandler_Frame_AttrStore_Diff(t *testing.T) { // Ensure the handler can backup a fragment and then restore it. func TestHandler_Fragment_BackupRestore(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() + hldr := MustOpenHolder() + defer hldr.Close() s := NewServer() - s.Handler.Index = idx.Index + s.Handler.Holder = hldr.Holder defer s.Close() // Set bits in the index. - f0 := idx.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0) + f0 := hldr.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0) f0.MustSetBits(100, 1, 2, 3) - // Begin backing up from slice d/f/0. - resp, err := http.Get(s.URL + "/fragment/data?db=d&frame=f&view=standard&slice=0") + // Begin backing up from slice i/f/0. + resp, err := http.Get(s.URL + "/fragment/data?index=i&frame=f&view=standard&slice=0") if err != nil { t.Fatal(err) } @@ -714,12 +714,12 @@ func TestHandler_Fragment_BackupRestore(t *testing.T) { } // Create frame. - if _, err := idx.MustCreateDBIfNotExists("x", pilosa.DBOptions{}).CreateFrame("y", pilosa.FrameOptions{}); err != nil { + if _, err := hldr.MustCreateIndexIfNotExists("x", pilosa.IndexOptions{}).CreateFrame("y", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } // Restore backup to slice x/y/0. - if resp, err := http.Post(s.URL+"/fragment/data?db=x&frame=y&view=standard&slice=0", "application/octet-stream", resp.Body); err != nil { + if resp, err := http.Post(s.URL+"/fragment/data?index=x&frame=y&view=standard&slice=0", "application/octet-stream", resp.Body); err != nil { t.Fatal(err) } else if resp.StatusCode != http.StatusOK { resp.Body.Close() @@ -729,10 +729,10 @@ func TestHandler_Fragment_BackupRestore(t *testing.T) { } // Verify data is correctly restored. - f1 := idx.Fragment("x", "y", pilosa.ViewStandard, 0) + f1 := hldr.Fragment("x", "y", pilosa.ViewStandard, 0) if f1 == nil { t.Fatal("fragment x/y/standard/0 not created") - } else if bits := f1.Bitmap(100).Bits(); !reflect.DeepEqual(bits, []uint64{1, 2, 3}) { + } else if bits := f1.Row(100).Bits(); !reflect.DeepEqual(bits, []uint64{1, 2, 3}) { t.Fatalf("unexpected restored bits: %+v", bits) } } @@ -759,7 +759,7 @@ func TestHandler_Fragment_Nodes(t *testing.T) { h.Cluster.ReplicaN = 2 w := httptest.NewRecorder() - r := MustNewHTTPRequest("GET", "/fragment/nodes?db=X&slice=0", nil) + r := MustNewHTTPRequest("GET", "/fragment/nodes?index=X&slice=0", nil) h.ServeHTTP(w, r) if w.Code != http.StatusOK { t.Fatalf("unexpected status code: %d", w.Code) @@ -802,13 +802,13 @@ func NewHandler() *Handler { // HandlerExecutor is a mock implementing pilosa.Handler.Executor. type HandlerExecutor struct { cluster *pilosa.Cluster - ExecuteFn func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) + ExecuteFn func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) } func (c *HandlerExecutor) Cluster() *pilosa.Cluster { return c.cluster } -func (c *HandlerExecutor) Execute(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - return c.ExecuteFn(ctx, db, query, slices, opt) +func (c *HandlerExecutor) Execute(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + return c.ExecuteFn(ctx, index, query, slices, opt) } // Server represents a test wrapper for httptest.Server. diff --git a/holder.go b/holder.go new file mode 100644 index 000000000..426817410 --- /dev/null +++ b/holder.go @@ -0,0 +1,547 @@ +package pilosa + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "sync" + "time" +) + +// DefaultCacheFlushInterval is the default value for Fragment.CacheFlushInterval. +const DefaultCacheFlushInterval = 1 * time.Minute + +// Holder represents a container for indexes. +type Holder struct { + mu sync.Mutex + + // Indexes by name. + indexes map[string]*Index + + Broadcaster Broadcaster + // Close management + wg sync.WaitGroup + closing chan struct{} + + // Stats + Stats StatsClient + + // Data directory path. + Path string + + // The interval at which the cached row ids are persisted to disk. + CacheFlushInterval time.Duration + + LogOutput io.Writer +} + +// NewHolder returns a new instance of Holder. +func NewHolder() *Holder { + return &Holder{ + indexes: make(map[string]*Index), + closing: make(chan struct{}, 0), + + Stats: NopStatsClient, + + CacheFlushInterval: DefaultCacheFlushInterval, + + LogOutput: os.Stderr, + } +} + +// Open initializes the root data directory for the holder. +func (h *Holder) Open() error { + if err := os.MkdirAll(h.Path, 0777); err != nil { + return err + } + + // Open path to read all index directories. + f, err := os.Open(h.Path) + if err != nil { + return err + } + defer f.Close() + + fis, err := f.Readdir(0) + if err != nil { + return err + } + + for _, fi := range fis { + if !fi.IsDir() { + continue + } + + h.logger().Printf("opening index: %s", filepath.Base(fi.Name())) + + index, err := h.newIndex(h.IndexPath(filepath.Base(fi.Name())), filepath.Base(fi.Name())) + if err == ErrName { + h.logger().Printf("ERROR opening index: %s, err=%s", fi.Name(), err) + continue + } else if err != nil { + return err + } + if err := index.Open(); err != nil { + if err == ErrName { + h.logger().Printf("ERROR opening index: %s, err=%s", index.Name(), err) + continue + } + return fmt.Errorf("open index: name=%s, err=%s", index.Name(), err) + } + h.indexes[index.Name()] = index + + h.Stats.Count("indexN", 1) + } + + // Periodically flush cache. + h.wg.Add(1) + go func() { defer h.wg.Done(); h.monitorCacheFlush() }() + + return nil +} + +// Close closes all open fragments. +func (h *Holder) Close() error { + // Notify goroutines of closing and wait for completion. + close(h.closing) + h.wg.Wait() + + for _, index := range h.indexes { + index.Close() + } + return nil +} + +// MaxSlices returns MaxSlice map for all indexes. +func (h *Holder) MaxSlices() map[string]uint64 { + a := make(map[string]uint64) + for _, index := range h.Indexes() { + a[index.Name()] = index.MaxSlice() + } + return a +} + +// MaxInverseSlices returns MaxInverseSlice map for all indexes. +func (h *Holder) MaxInverseSlices() map[string]uint64 { + a := make(map[string]uint64) + for _, index := range h.Indexes() { + a[index.Name()] = index.MaxInverseSlice() + } + return a +} + +// Schema returns schema data for all indexes and frames. +func (h *Holder) Schema() []*IndexInfo { + var a []*IndexInfo + for _, index := range h.Indexes() { + di := &IndexInfo{Name: index.Name()} + for _, frame := range index.Frames() { + fi := &FrameInfo{Name: frame.Name()} + for _, view := range frame.Views() { + fi.Views = append(fi.Views, &ViewInfo{Name: view.Name()}) + } + sort.Sort(viewInfoSlice(fi.Views)) + di.Frames = append(di.Frames, fi) + } + sort.Sort(frameInfoSlice(di.Frames)) + a = append(a, di) + } + sort.Sort(indexInfoSlice(a)) + return a +} + +// IndexPath returns the path where a given index is stored. +func (h *Holder) IndexPath(name string) string { return filepath.Join(h.Path, name) } + +// Index returns the index by name. +func (h *Holder) Index(name string) *Index { + h.mu.Lock() + defer h.mu.Unlock() + return h.index(name) +} + +func (h *Holder) index(name string) *Index { return h.indexes[name] } + +// Indexes returns a list of all indexes in the holder. +func (h *Holder) Indexes() []*Index { + h.mu.Lock() + defer h.mu.Unlock() + + a := make([]*Index, 0, len(h.indexes)) + for _, index := range h.indexes { + a = append(a, index) + } + sort.Sort(indexSlice(a)) + + return a +} + +// CreateIndex creates an index. +// An error is returned if the index already exists. +func (h *Holder) CreateIndex(name string, opt IndexOptions) (*Index, error) { + h.mu.Lock() + defer h.mu.Unlock() + + // Ensure index doesn't already exist. + if h.indexes[name] != nil { + return nil, ErrIndexExists + } + return h.createIndex(name, opt) +} + +// CreateIndexIfNotExists returns an index by name. +// The index is created if it does not already exist. +func (h *Holder) CreateIndexIfNotExists(name string, opt IndexOptions) (*Index, error) { + h.mu.Lock() + defer h.mu.Unlock() + + // Find index in cache first. + if index := h.indexes[name]; index != nil { + return index, nil + } + + return h.createIndex(name, opt) +} + +func (h *Holder) createIndex(name string, opt IndexOptions) (*Index, error) { + if name == "" { + return nil, errors.New("index name required") + } + + // Return index if it exists. + if index := h.index(name); index != nil { + return index, nil + } + + // Otherwise create a new index. + index, err := h.newIndex(h.IndexPath(name), name) + if err != nil { + return nil, err + } + + if err := index.Open(); err != nil { + return nil, err + } + + // Update options. + index.SetColumnLabel(opt.ColumnLabel) + index.SetTimeQuantum(opt.TimeQuantum) + + h.indexes[index.Name()] = index + + h.Stats.Count("indexN", 1) + + return index, nil +} + +func (h *Holder) newIndex(path, name string) (*Index, error) { + index, err := NewIndex(path, name) + if err != nil { + return nil, err + } + index.LogOutput = h.LogOutput + index.stats = h.Stats.WithTags(fmt.Sprintf("index:%s", index.Name())) + index.broadcaster = h.Broadcaster + return index, nil +} + +// DeleteIndex removes an index from the holder. +func (h *Holder) DeleteIndex(name string) error { + h.mu.Lock() + defer h.mu.Unlock() + + // Ignore if index doesn't exist. + index := h.index(name) + if index == nil { + return nil + } + + // Close index. + if err := index.Close(); err != nil { + return err + } + + // Delete index directory. + if err := os.RemoveAll(h.IndexPath(name)); err != nil { + return err + } + + // Remove reference. + delete(h.indexes, name) + + h.Stats.Count("indexN", -1) + + return nil +} + +// Frame returns the frame for an index and name. +func (h *Holder) Frame(index, name string) *Frame { + idx := h.Index(index) + if idx == nil { + return nil + } + return idx.Frame(name) +} + +// View returns the view for an index, frame, and name. +func (h *Holder) View(index, frame, name string) *View { + f := h.Frame(index, frame) + if f == nil { + return nil + } + return f.View(name) +} + +// Fragment returns the fragment for an index, frame & slice. +func (h *Holder) Fragment(index, frame, view string, slice uint64) *Fragment { + v := h.View(index, frame, view) + if v == nil { + return nil + } + return v.Fragment(slice) +} + +// monitorCacheFlush periodically flushes all fragment caches sequentially. +// This is run in a goroutine. +func (h *Holder) monitorCacheFlush() { + ticker := time.NewTicker(h.CacheFlushInterval) + defer ticker.Stop() + + for { + select { + case <-h.closing: + return + case <-ticker.C: + h.flushCaches() + } + } +} + +func (h *Holder) flushCaches() { + for _, index := range h.Indexes() { + for _, frame := range index.Frames() { + for _, view := range frame.Views() { + for _, fragment := range view.Fragments() { + select { + case <-h.closing: + return + default: + } + + if err := fragment.FlushCache(); err != nil { + h.logger().Printf("error flushing cache: err=%s, path=%s", err, fragment.CachePath()) + } + } + } + } + } +} + +func (h *Holder) logger() *log.Logger { return log.New(h.LogOutput, "", log.LstdFlags) } + +// HolderSyncer is an active anti-entropy tool that compares the local holder +// with a remote holder based on block checksums and resolves differences. +type HolderSyncer struct { + Holder *Holder + + Host string + Cluster *Cluster + + // Signals that the sync should stop. + Closing <-chan struct{} +} + +// Returns true if the syncer has been marked to close. +func (s *HolderSyncer) IsClosing() bool { + select { + case <-s.Closing: + return true + default: + return false + } +} + +// SyncHolder compares the holder on host with the local holder and resolves differences. +func (s *HolderSyncer) SyncHolder() error { + // Iterate over schema in sorted order. + for _, di := range s.Holder.Schema() { + // Verify syncer has not closed. + if s.IsClosing() { + return nil + } + + // Sync index column attributes. + if err := s.syncIndex(di.Name); err != nil { + return fmt.Errorf("index sync error: index=%s, err=%s", di.Name, err) + } + + for _, fi := range di.Frames { + // Verify syncer has not closed. + if s.IsClosing() { + return nil + } + + // Sync frame row attributes. + if err := s.syncFrame(di.Name, fi.Name); err != nil { + return fmt.Errorf("frame sync error: index=%s, frame=%s, err=%s", di.Name, fi.Name, err) + } + + for _, vi := range fi.Views { + // Verify syncer has not closed. + if s.IsClosing() { + return nil + } + + for slice := uint64(0); slice <= s.Holder.Index(di.Name).MaxSlice(); slice++ { + // Ignore slices that this host doesn't own. + if !s.Cluster.OwnsFragment(s.Host, di.Name, slice) { + continue + } + + // Verify syncer has not closed. + if s.IsClosing() { + return nil + } + + // Sync fragment if own it. + if err := s.syncFragment(di.Name, fi.Name, vi.Name, slice); err != nil { + return fmt.Errorf("fragment sync error: index=%s, frame=%s, slice=%d, err=%s", di.Name, fi.Name, slice, err) + } + } + } + } + } + + return nil +} + +// syncIndex synchronizes index attributes with the rest of the cluster. +func (s *HolderSyncer) syncIndex(index string) error { + // Retrieve index reference. + idx := s.Holder.Index(index) + if idx == nil { + return nil + } + + // Read block checksums. + blks, err := idx.ColumnAttrStore().Blocks() + if err != nil { + return err + } + + // Sync with every other host. + for _, node := range Nodes(s.Cluster.Nodes).FilterHost(s.Host) { + client, err := NewClient(node.Host) + if err != nil { + return err + } + + // Retrieve attributes from differing blocks. + // Skip update and recomputation if no attributes have changed. + m, err := client.ColumnAttrDiff(context.Background(), index, blks) + if err != nil { + return err + } else if len(m) == 0 { + continue + } + + // Update local copy. + if err := idx.ColumnAttrStore().SetBulkAttrs(m); err != nil { + return err + } + + // Recompute blocks. + blks, err = idx.ColumnAttrStore().Blocks() + if err != nil { + return err + } + } + + return nil +} + +// syncFrame synchronizes frame attributes with the rest of the cluster. +func (s *HolderSyncer) syncFrame(index, name string) error { + // Retrieve index reference. + f := s.Holder.Frame(index, name) + if f == nil { + return nil + } + + // Read block checksums. + blks, err := f.RowAttrStore().Blocks() + if err != nil { + return err + } + + // Sync with every other host. + for _, node := range Nodes(s.Cluster.Nodes).FilterHost(s.Host) { + client, err := NewClient(node.Host) + if err != nil { + return err + } + + // Retrieve attributes from differing blocks. + // Skip update and recomputation if no attributes have changed. + m, err := client.RowAttrDiff(context.Background(), index, name, blks) + if err == ErrFrameNotFound { + continue // frame not created remotely yet, skip + } else if err != nil { + return err + } else if len(m) == 0 { + continue + } + + // Update local copy. + if err := f.RowAttrStore().SetBulkAttrs(m); err != nil { + return err + } + + // Recompute blocks. + blks, err = f.RowAttrStore().Blocks() + if err != nil { + return err + } + } + + return nil +} + +// syncFragment synchronizes a fragment with the rest of the cluster. +func (s *HolderSyncer) syncFragment(index, frame, view string, slice uint64) error { + // Retrieve local frame. + f := s.Holder.Frame(index, frame) + if f == nil { + return ErrFrameNotFound + } + + // Ensure view exists locally. + v, err := f.CreateViewIfNotExists(view) + if err != nil { + return err + } + + // Ensure fragment exists locally. + frag, err := v.CreateFragmentIfNotExists(slice) + if err != nil { + return err + } + + // Sync fragments together. + fs := FragmentSyncer{ + Fragment: frag, + Host: s.Host, + Cluster: s.Cluster, + Closing: s.Closing, + } + if err := fs.SyncFragment(); err != nil { + return err + } + + return nil +} diff --git a/holder_test.go b/holder_test.go new file mode 100644 index 000000000..78c4700f8 --- /dev/null +++ b/holder_test.go @@ -0,0 +1,234 @@ +package pilosa_test + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "reflect" + "testing" + + "github.com/pilosa/pilosa" + "github.com/pilosa/pilosa/pql" +) + +// Ensure holder can delete an index and its underlying files. +func TestHolder_DeleteIndex(t *testing.T) { + hldr := MustOpenHolder() + defer hldr.Close() + + // Write bits to separate indexes. + f0 := hldr.MustCreateFragmentIfNotExists("i0", "f", pilosa.ViewStandard, 0) + if _, err := f0.SetBit(100, 200); err != nil { + t.Fatal(err) + } + f1 := hldr.MustCreateFragmentIfNotExists("i1", "f", pilosa.ViewStandard, 0) + if _, err := f1.SetBit(100, 200); err != nil { + t.Fatal(err) + } + + // Ensure i0 exists. + if _, err := os.Stat(hldr.IndexPath("i0")); err != nil { + t.Fatal(err) + } + + // Delete i0. + if err := hldr.DeleteIndex("i0"); err != nil { + t.Fatal(err) + } + + // Ensure i0 files are removed & i1 still exists. + if _, err := os.Stat(hldr.IndexPath("i0")); !os.IsNotExist(err) { + t.Fatal("expected i0 file deletion") + } else if _, err := os.Stat(hldr.IndexPath("i1")); err != nil { + t.Fatal("expected i1 files to still exist", err) + } +} + +// Ensure holder can sync with a remote holder. +func TestHolderSyncer_SyncHolder(t *testing.T) { + cluster := NewCluster(2) + + // Create a local holder. + hldr0 := MustOpenHolder() + defer hldr0.Close() + + // Create a remote holder wrapped by an HTTP + hldr1 := MustOpenHolder() + defer hldr1.Close() + s := NewServer() + defer s.Close() + s.Handler.Holder = hldr1.Holder + s.Handler.Executor.ExecuteFn = func(ctx context.Context, index string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { + e := pilosa.NewExecutor() + e.Holder = hldr1.Holder + e.Host = cluster.Nodes[1].Host + e.Cluster = cluster + return e.Execute(ctx, index, query, slices, opt) + } + + // Mock 2-node, fully replicated cluster. + cluster.ReplicaN = 2 + cluster.Nodes[0].Host = "localhost:0" + cluster.Nodes[1].Host = MustParseURLHost(s.URL) + + // Create frames on nodes. + for _, hldr := range []*Holder{hldr0, hldr1} { + hldr.MustCreateFrameIfNotExists("i", "f") + hldr.MustCreateFrameIfNotExists("i", "f0") + hldr.MustCreateFrameIfNotExists("y", "z") + } + + // Set data on the local holder. + f := hldr0.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0) + if _, err := f.SetBit(0, 10); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(2, 20); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(120, 10); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(200, 4); err != nil { + t.Fatal(err) + } + + f = hldr0.MustCreateFragmentIfNotExists("i", "f0", pilosa.ViewStandard, 1) + if _, err := f.SetBit(9, SliceWidth+5); err != nil { + t.Fatal(err) + } + + hldr0.MustCreateFragmentIfNotExists("y", "z", pilosa.ViewStandard, 0) + + // Set data on the remote holder. + f = hldr1.MustCreateFragmentIfNotExists("i", "f", pilosa.ViewStandard, 0) + if _, err := f.SetBit(0, 4000); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(3, 10); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(120, 10); err != nil { + t.Fatal(err) + } + + f = hldr1.MustCreateFragmentIfNotExists("y", "z", pilosa.ViewStandard, 3) + if _, err := f.SetBit(10, (3*SliceWidth)+4); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(10, (3*SliceWidth)+5); err != nil { + t.Fatal(err) + } else if _, err := f.SetBit(10, (3*SliceWidth)+7); err != nil { + t.Fatal(err) + } + + // Set highest slice. + hldr0.Index("i").SetRemoteMaxSlice(1) + hldr0.Index("y").SetRemoteMaxSlice(3) + + // Set up syncer. + syncer := pilosa.HolderSyncer{ + Holder: hldr0.Holder, + Host: cluster.Nodes[0].Host, + Cluster: cluster, + } + + if err := syncer.SyncHolder(); err != nil { + t.Fatal(err) + } + + // Verify data is the same on both nodes. + for i, hldr := range []*Holder{hldr0, hldr1} { + f := hldr.Fragment("i", "f", pilosa.ViewStandard, 0) + if a := f.Row(0).Bits(); !reflect.DeepEqual(a, []uint64{10, 4000}) { + t.Fatalf("unexpected bits(%d/0): %+v", i, a) + } else if a := f.Row(2).Bits(); !reflect.DeepEqual(a, []uint64{20}) { + t.Fatalf("unexpected bits(%d/2): %+v", i, a) + } else if a := f.Row(3).Bits(); !reflect.DeepEqual(a, []uint64{10}) { + t.Fatalf("unexpected bits(%d/3): %+v", i, a) + } else if a := f.Row(120).Bits(); !reflect.DeepEqual(a, []uint64{10}) { + t.Fatalf("unexpected bits(%d/120): %+v", i, a) + } else if a := f.Row(200).Bits(); !reflect.DeepEqual(a, []uint64{4}) { + t.Fatalf("unexpected bits(%d/200): %+v", i, a) + } + + f = hldr.Fragment("i", "f0", pilosa.ViewStandard, 1) + a := f.Row(9).Bits() + if !reflect.DeepEqual(a, []uint64{SliceWidth + 5}) { + t.Fatalf("unexpected bits(%d/i/f0): %+v", i, a) + } + if a := f.Row(9).Bits(); !reflect.DeepEqual(a, []uint64{SliceWidth + 5}) { + t.Fatalf("unexpected bits(%d/d/f0): %+v", i, a) + } + f = hldr.Fragment("y", "z", pilosa.ViewStandard, 3) + if a := f.Row(10).Bits(); !reflect.DeepEqual(a, []uint64{(3 * SliceWidth) + 4, (3 * SliceWidth) + 5, (3 * SliceWidth) + 7}) { + t.Fatalf("unexpected bits(%d/y/z): %+v", i, a) + } + } +} + +// Holder is a test wrapper for pilosa.Holder. +type Holder struct { + *pilosa.Holder + LogOutput bytes.Buffer +} + +// NewHolder returns a new instance of Holder with a temporary path. +func NewHolder() *Holder { + path, err := ioutil.TempDir("", "pilosa-") + if err != nil { + panic(err) + } + + h := &Holder{Holder: pilosa.NewHolder()} + h.Path = path + h.Holder.LogOutput = &h.LogOutput + + return h +} + +// MustOpenHolder creates and opens a holder at a temporary path. Panic on error. +func MustOpenHolder() *Holder { + h := NewHolder() + if err := h.Open(); err != nil { + panic(err) + } + return h +} + +// Close closes the holder and removes all underlying data. +func (h *Holder) Close() error { + defer os.RemoveAll(h.Path) + return h.Holder.Close() +} + +// MustCreateIndexIfNotExists returns a given index. Panic on error. +func (h *Holder) MustCreateIndexIfNotExists(index string, opt pilosa.IndexOptions) *Index { + idx, err := h.Holder.CreateIndexIfNotExists(index, opt) + if err != nil { + panic(err) + } + return &Index{Index: idx} +} + +// MustCreateFrameIfNotExists returns a given frame. Panic on error. +func (h *Holder) MustCreateFrameIfNotExists(index, frame string) *Frame { + f, err := h.MustCreateIndexIfNotExists(index, pilosa.IndexOptions{}).CreateFrameIfNotExists(frame, pilosa.FrameOptions{}) + if err != nil { + panic(err) + } + return f +} + +// MustCreateFragmentIfNotExists returns a given fragment. Panic on error. +func (h *Holder) MustCreateFragmentIfNotExists(index, frame, view string, slice uint64) *Fragment { + idx := h.MustCreateIndexIfNotExists(index, pilosa.IndexOptions{}) + f, err := idx.CreateFrameIfNotExists(frame, pilosa.FrameOptions{}) + if err != nil { + panic(err) + } + v, err := f.CreateViewIfNotExists(view) + if err != nil { + panic(err) + } + frag, err := v.CreateFragmentIfNotExists(slice) + if err != nil { + panic(err) + } + return &Fragment{Fragment: frag} +} diff --git a/index.go b/index.go index 965c25ff0..3fa8ac028 100644 --- a/index.go +++ b/index.go @@ -1,67 +1,146 @@ package pilosa import ( - "context" "errors" "fmt" "io" - "log" + "io/ioutil" "os" "path/filepath" "sort" "sync" "time" + + "github.com/gogo/protobuf/proto" + "github.com/pilosa/pilosa/internal" ) -// DefaultCacheFlushInterval is the default value for Fragment.CacheFlushInterval. -const DefaultCacheFlushInterval = 1 * time.Minute +// Default index settings. +const ( + DefaultColumnLabel = "columnID" +) -// Index represents a container for fragments. +// Index represents a container for frames. type Index struct { - mu sync.Mutex + mu sync.Mutex + path string + name string - // Databases by name. - dbs map[string]*DB + // Default time quantum for all frames in index. + // This can be overridden by individual frames. + timeQuantum TimeQuantum - Broadcaster Broadcaster - // Close management - wg sync.WaitGroup - closing chan struct{} + // Label used for referring to columns in index. + columnLabel string - // Stats - Stats StatsClient + // Frames by name. + frames map[string]*Frame - // Data directory path. - Path string + // Max Slice on any node in the cluster, according to this node + remoteMaxSlice uint64 + remoteMaxInverseSlice uint64 - // The interval at which the cached bitmap ids are persisted to disk. - CacheFlushInterval time.Duration + // Column attribute storage and cache + columnAttrStore *AttrStore + + broadcaster Broadcaster + stats StatsClient LogOutput io.Writer } // NewIndex returns a new instance of Index. -func NewIndex() *Index { +func NewIndex(path, name string) (*Index, error) { + err := ValidateName(name) + if err != nil { + return nil, err + } + return &Index{ - dbs: make(map[string]*DB), - closing: make(chan struct{}, 0), + path: path, + name: name, + frames: make(map[string]*Frame), + + remoteMaxSlice: 0, + remoteMaxInverseSlice: 0, + + columnAttrStore: NewAttrStore(filepath.Join(path, ".data")), + + columnLabel: DefaultColumnLabel, + + stats: NopStatsClient, + LogOutput: ioutil.Discard, + }, nil +} + +// Name returns name of the index. +func (i *Index) Name() string { return i.name } + +// Path returns the path the index was initialized with. +func (i *Index) Path() string { return i.path } - Stats: NopStatsClient, +// ColumnAttrStore returns the storage for column attributes. +func (i *Index) ColumnAttrStore() *AttrStore { return i.columnAttrStore } - CacheFlushInterval: DefaultCacheFlushInterval, +// SetColumnLabel sets the column label. Persists to meta file on update. +func (i *Index) SetColumnLabel(v string) error { + i.mu.Lock() + defer i.mu.Unlock() + + // Ignore if no change occurred. + if v == "" || i.columnLabel == v { + return nil + } + + // Make sure columnLabel is valid name + err := ValidateName(v) + if err != nil { + return err + } - LogOutput: os.Stderr, + // Persist meta data to disk on change. + i.columnLabel = v + if err := i.saveMeta(); err != nil { + return err } + + return nil } -// Open initializes the root data directory for the index. +// ColumnLabel returns the column label. +func (i *Index) ColumnLabel() string { + i.mu.Lock() + v := i.columnLabel + i.mu.Unlock() + return v +} + +// Open opens and initializes the index. func (i *Index) Open() error { - if err := os.MkdirAll(i.Path, 0777); err != nil { + // Ensure the path exists. + if err := os.MkdirAll(i.path, 0777); err != nil { + return err + } + + // Read meta file. + if err := i.loadMeta(); err != nil { + return err + } + + if err := i.openFrames(); err != nil { return err } - // Open path to read all database directories. - f, err := os.Open(i.Path) + if err := i.columnAttrStore.Open(); err != nil { + return err + } + + return nil +} + +// openFrames opens and initializes the frames inside the index. +func (i *Index) openFrames() error { + f, err := os.Open(i.path) if err != nil { return err } @@ -77,471 +156,410 @@ func (i *Index) Open() error { continue } - i.logger().Printf("opening database: %s", filepath.Base(fi.Name())) + fr, err := i.newFrame(i.FramePath(filepath.Base(fi.Name())), filepath.Base(fi.Name())) + if err != nil { + return ErrName + } + if err := fr.Open(); err != nil { + return fmt.Errorf("open frame: name=%s, err=%s", fr.Name(), err) + } + i.frames[fr.Name()] = fr - db, err := i.newDB(i.DBPath(filepath.Base(fi.Name())), filepath.Base(fi.Name())) - if err == ErrName { - i.logger().Printf("ERROR opening database: %s, err=%s", fi.Name(), err) - continue - } else if err != nil { + i.stats.Count("frameN", 1) + } + return nil +} + +// loadMeta reads meta data for the index, if any. +func (i *Index) loadMeta() error { + var pb internal.IndexMeta + + // Read data from meta file. + buf, err := ioutil.ReadFile(filepath.Join(i.path, ".meta")) + if os.IsNotExist(err) { + i.timeQuantum = "" + i.columnLabel = DefaultColumnLabel + return nil + } else if err != nil { + return err + } else { + if err := proto.Unmarshal(buf, &pb); err != nil { return err } - if err := db.Open(); err != nil { - if err == ErrName { - i.logger().Printf("ERROR opening database: %s, err=%s", db.Name(), err) - continue - } - return fmt.Errorf("open db: name=%s, err=%s", db.Name(), err) - } - i.dbs[db.Name()] = db + } - i.Stats.Count("dbN", 1) + // Copy metadata fields. + i.timeQuantum = TimeQuantum(pb.TimeQuantum) + i.columnLabel = pb.ColumnLabel + + return nil +} + +// saveMeta writes meta data for the index. +func (i *Index) saveMeta() error { + // Marshal metadata. + buf, err := proto.Marshal(&internal.IndexMeta{ + TimeQuantum: string(i.timeQuantum), + ColumnLabel: i.columnLabel, + }) + if err != nil { + return err } - // Periodically flush cache. - i.wg.Add(1) - go func() { defer i.wg.Done(); i.monitorCacheFlush() }() + // Write to meta file. + if err := ioutil.WriteFile(filepath.Join(i.path, ".meta"), buf, 0666); err != nil { + return err + } return nil } -// Close closes all open fragments. +// Close closes the index and its frames. func (i *Index) Close() error { - // Notify goroutines of closing and wait for completion. - close(i.closing) - i.wg.Wait() + i.mu.Lock() + defer i.mu.Unlock() - for _, db := range i.dbs { - db.Close() + // Close the attribute store. + if i.columnAttrStore != nil { + i.columnAttrStore.Close() } + + // Close all frames. + for _, f := range i.frames { + f.Close() + } + i.frames = make(map[string]*Frame) + return nil } -// MaxSlices returns MaxSlice map for all databases. -func (i *Index) MaxSlices() map[string]uint64 { - a := make(map[string]uint64) - for _, db := range i.DBs() { - a[db.Name()] = db.MaxSlice() +// MaxSlice returns the max slice in the index according to this node. +func (i *Index) MaxSlice() uint64 { + if i == nil { + return 0 } - return a -} + i.mu.Lock() + defer i.mu.Unlock() -// MaxInverseSlices returns MaxInverseSlice map for all databases. -func (i *Index) MaxInverseSlices() map[string]uint64 { - a := make(map[string]uint64) - for _, db := range i.DBs() { - a[db.Name()] = db.MaxInverseSlice() + max := i.remoteMaxSlice + for _, f := range i.frames { + if slice := f.MaxSlice(); slice > max { + max = slice + } } - return a + return max } -// Schema returns schema data for all databases and frames. -func (i *Index) Schema() []*DBInfo { - var a []*DBInfo - for _, db := range i.DBs() { - di := &DBInfo{Name: db.Name()} - for _, frame := range db.Frames() { - fi := &FrameInfo{Name: frame.Name()} - for _, view := range frame.Views() { - fi.Views = append(fi.Views, &ViewInfo{Name: view.Name()}) - } - sort.Sort(viewInfoSlice(fi.Views)) - di.Frames = append(di.Frames, fi) +func (i *Index) SetRemoteMaxSlice(newmax uint64) { + i.mu.Lock() + defer i.mu.Unlock() + i.remoteMaxSlice = newmax +} + +// MaxInverseSlice returns the max inverse slice in the index according to this node. +func (i *Index) MaxInverseSlice() uint64 { + if i == nil { + return 0 + } + i.mu.Lock() + defer i.mu.Unlock() + + max := i.remoteMaxInverseSlice + for _, f := range i.frames { + if slice := f.MaxInverseSlice(); slice > max { + max = slice } - sort.Sort(frameInfoSlice(di.Frames)) - a = append(a, di) } - sort.Sort(dbInfoSlice(a)) - return a + return max } -// DBPath returns the path where a given database is stored. -func (i *Index) DBPath(name string) string { return filepath.Join(i.Path, name) } +func (i *Index) SetRemoteMaxInverseSlice(v uint64) { + i.mu.Lock() + defer i.mu.Unlock() + i.remoteMaxInverseSlice = v +} -// DB returns the database by name. -func (i *Index) DB(name string) *DB { +// TimeQuantum returns the default time quantum for the index. +func (i *Index) TimeQuantum() TimeQuantum { i.mu.Lock() defer i.mu.Unlock() - return i.db(name) + return i.timeQuantum } -func (i *Index) db(name string) *DB { return i.dbs[name] } +// SetTimeQuantum sets the default time quantum for the index. +func (i *Index) SetTimeQuantum(q TimeQuantum) error { + i.mu.Lock() + defer i.mu.Unlock() -// DBs returns a list of all databases in the index. -func (i *Index) DBs() []*DB { + // Validate input. + if !q.Valid() { + return ErrInvalidTimeQuantum + } + + // Update value on index. + i.timeQuantum = q + + // Perist meta data to disk. + if err := i.saveMeta(); err != nil { + return err + } + + return nil +} + +// FramePath returns the path to a frame in the index. +func (i *Index) FramePath(name string) string { return filepath.Join(i.path, name) } + +// Frame returns a frame in the index by name. +func (i *Index) Frame(name string) *Frame { i.mu.Lock() defer i.mu.Unlock() + return i.frame(name) +} - a := make([]*DB, 0, len(i.dbs)) - for _, db := range i.dbs { - a = append(a, db) +func (i *Index) frame(name string) *Frame { return i.frames[name] } + +// Frames returns a list of all frames in the index. +func (i *Index) Frames() []*Frame { + i.mu.Lock() + defer i.mu.Unlock() + + a := make([]*Frame, 0, len(i.frames)) + for _, f := range i.frames { + a = append(a, f) } - sort.Sort(dbSlice(a)) + sort.Sort(frameSlice(a)) return a } -// CreateDB creates a database. -// An error is returned if the database already exists. -func (i *Index) CreateDB(name string, opt DBOptions) (*DB, error) { +// CreateFrame creates a frame. +func (i *Index) CreateFrame(name string, opt FrameOptions) (*Frame, error) { i.mu.Lock() defer i.mu.Unlock() - // Ensure db doesn't already exist. - if i.dbs[name] != nil { - return nil, ErrDatabaseExists + // Ensure frame doesn't already exist. + if i.frames[name] != nil { + return nil, ErrFrameExists } - return i.createDB(name, opt) + return i.createFrame(name, opt) } -// CreateDBIfNotExists returns a database by name. -// The database is created if it does not already exist. -func (i *Index) CreateDBIfNotExists(name string, opt DBOptions) (*DB, error) { +// CreateFrameIfNotExists creates a frame with the given options if it doesn't exist. +func (i *Index) CreateFrameIfNotExists(name string, opt FrameOptions) (*Frame, error) { i.mu.Lock() defer i.mu.Unlock() - // Find database in cache first. - if db := i.dbs[name]; db != nil { - return db, nil + // Find frame in cache first. + if f := i.frames[name]; f != nil { + return f, nil } - return i.createDB(name, opt) + return i.createFrame(name, opt) } -func (i *Index) createDB(name string, opt DBOptions) (*DB, error) { +func (i *Index) createFrame(name string, opt FrameOptions) (*Frame, error) { if name == "" { - return nil, errors.New("database name required") + return nil, errors.New("frame name required") + } else if opt.CacheType != "" && !IsValidCacheType(opt.CacheType) { + return nil, ErrInvalidCacheType } - // Return database if it exists. - if db := i.db(name); db != nil { - return db, nil + // Initialize frame. + f, err := i.newFrame(i.FramePath(name), name) + if err != nil { + return nil, err } - // Otherwise create a new database. - db, err := i.newDB(i.DBPath(name), name) - if err != nil { + // Open frame. + if err := f.Open(); err != nil { return nil, err } - if err := db.Open(); err != nil { + // Default the time quantum to what is set on the Index. + if err := f.SetTimeQuantum(i.timeQuantum); err != nil { + f.Close() return nil, err } - // Update options. - db.SetColumnLabel(opt.ColumnLabel) - db.SetTimeQuantum(opt.TimeQuantum) + // Set cache type. + if opt.CacheType == "" { + opt.CacheType = DefaultCacheType + } + f.cacheType = opt.CacheType + + // Set options. + if opt.RowLabel != "" { + f.rowLabel = opt.RowLabel + } + if opt.CacheSize != 0 { + f.cacheSize = opt.CacheSize + } + + f.inverseEnabled = opt.InverseEnabled + if err := f.saveMeta(); err != nil { + f.Close() + return nil, err + } - i.dbs[db.Name()] = db + // Add to index's frame lookup. + i.frames[name] = f - i.Stats.Count("dbN", 1) + i.stats.Count("frameN", 1) - return db, nil + return f, nil } -func (i *Index) newDB(path, name string) (*DB, error) { - db, err := NewDB(path, name) +func (i *Index) newFrame(path, name string) (*Frame, error) { + f, err := NewFrame(path, i.name, name) if err != nil { return nil, err } - db.LogOutput = i.LogOutput - db.Stats = i.Stats.WithTags(fmt.Sprintf("db:%s", db.Name())) - db.broadcaster = i.Broadcaster - return db, nil + f.LogOutput = i.LogOutput + f.stats = i.stats.WithTags(fmt.Sprintf("frame:%s", name)) + f.broadcaster = i.broadcaster + return f, nil } -// DeleteDB removes a database from the index. -func (i *Index) DeleteDB(name string) error { +// DeleteFrame removes a frame from the index. +func (i *Index) DeleteFrame(name string) error { i.mu.Lock() defer i.mu.Unlock() - // Ignore if database doesn't exist. - db := i.db(name) - if db == nil { + // Ignore if frame doesn't exist. + f := i.frame(name) + if f == nil { return nil } - // Close database. - if err := db.Close(); err != nil { + // Close frame. + if err := f.Close(); err != nil { return err } - // Delete database directory. - if err := os.RemoveAll(i.DBPath(name)); err != nil { + // Delete frame directory. + if err := os.RemoveAll(i.FramePath(name)); err != nil { return err } // Remove reference. - delete(i.dbs, name) + delete(i.frames, name) - i.Stats.Count("dbN", -1) + i.stats.Count("frameN", -1) return nil } -// Frame returns the frame for a database and name. -func (i *Index) Frame(db, name string) *Frame { - d := i.DB(db) - if d == nil { - return nil - } - return d.Frame(name) -} - -// View returns the view for a database, frame, and name. -func (i *Index) View(db, frame, name string) *View { - f := i.Frame(db, frame) - if f == nil { - return nil - } - return f.View(name) -} - -// Fragment returns the fragment for a database, frame & slice. -func (i *Index) Fragment(db, frame, view string, slice uint64) *Fragment { - v := i.View(db, frame, view) - if v == nil { - return nil - } - return v.Fragment(slice) -} +type indexSlice []*Index -// monitorCacheFlush periodically flushes all fragment caches sequentially. -// This is run in a goroutine. -func (i *Index) monitorCacheFlush() { - ticker := time.NewTicker(i.CacheFlushInterval) - defer ticker.Stop() - - for { - select { - case <-i.closing: - return - case <-ticker.C: - i.flushCaches() - } - } -} +func (p indexSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p indexSlice) Len() int { return len(p) } +func (p indexSlice) Less(i, j int) bool { return p[i].Name() < p[j].Name() } -func (i *Index) flushCaches() { - for _, db := range i.DBs() { - for _, frame := range db.Frames() { - for _, view := range frame.Views() { - for _, fragment := range view.Fragments() { - select { - case <-i.closing: - return - default: - } - - if err := fragment.FlushCache(); err != nil { - i.logger().Printf("error flushing cache: err=%s, path=%s", err, fragment.CachePath()) - } - } - } - } - } +// IndexInfo represents schema information for an index. +type IndexInfo struct { + Name string `json:"name"` + Frames []*FrameInfo `json:"frames"` } -func (i *Index) logger() *log.Logger { return log.New(i.LogOutput, "", log.LstdFlags) } +type indexInfoSlice []*IndexInfo -// IndexSyncer is an active anti-entropy tool that compares the local index -// with a remote index based on block checksums and resolves differences. -type IndexSyncer struct { - Index *Index +func (p indexInfoSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p indexInfoSlice) Len() int { return len(p) } +func (p indexInfoSlice) Less(i, j int) bool { return p[i].Name < p[j].Name } - Host string - Cluster *Cluster - - // Signals that the sync should stop. - Closing <-chan struct{} -} - -// Returns true if the syncer has been marked to close. -func (s *IndexSyncer) IsClosing() bool { - select { - case <-s.Closing: - return true - default: - return false - } -} - -// SyncIndex compares the index on host with the local index and resolves differences. -func (s *IndexSyncer) SyncIndex() error { - // Iterate over schema in sorted order. - for _, di := range s.Index.Schema() { - // Verify syncer has not closed. - if s.IsClosing() { - return nil - } - - // Sync database profile attributes. - if err := s.syncDatabase(di.Name); err != nil { - return fmt.Errorf("db sync error: db=%s, err=%s", di.Name, err) - } - - for _, fi := range di.Frames { - // Verify syncer has not closed. - if s.IsClosing() { - return nil +// MergeSchemas combines indexes and frames from a and b into one schema. +func MergeSchemas(a, b []*IndexInfo) []*IndexInfo { + // Generate a map from both schemas. + m := make(map[string]map[string]map[string]struct{}) + for _, idxs := range [][]*IndexInfo{a, b} { + for _, idx := range idxs { + if m[idx.Name] == nil { + m[idx.Name] = make(map[string]map[string]struct{}) } - - // Sync frame bitmap attributes. - if err := s.syncFrame(di.Name, fi.Name); err != nil { - return fmt.Errorf("frame sync error: db=%s, frame=%s, err=%s", di.Name, fi.Name, err) - } - - for _, vi := range fi.Views { - // Verify syncer has not closed. - if s.IsClosing() { - return nil + for _, frame := range idx.Frames { + if m[idx.Name][frame.Name] == nil { + m[idx.Name][frame.Name] = make(map[string]struct{}) } - - for slice := uint64(0); slice <= s.Index.DB(di.Name).MaxSlice(); slice++ { - // Ignore slices that this host doesn't own. - if !s.Cluster.OwnsFragment(s.Host, di.Name, slice) { - continue - } - - // Verify syncer has not closed. - if s.IsClosing() { - return nil - } - - // Sync fragment if own it. - if err := s.syncFragment(di.Name, fi.Name, vi.Name, slice); err != nil { - return fmt.Errorf("fragment sync error: db=%s, frame=%s, slice=%d, err=%s", di.Name, fi.Name, slice, err) - } + for _, view := range frame.Views { + m[idx.Name][frame.Name][view.Name] = struct{}{} } } } } - return nil -} - -// syncDatabase synchronizes database attributes with the rest of the cluster. -func (s *IndexSyncer) syncDatabase(db string) error { - // Retrieve database reference. - d := s.Index.DB(db) - if d == nil { - return nil - } - - // Read block checksums. - blks, err := d.ProfileAttrStore().Blocks() - if err != nil { - return err - } - - // Sync with every other host. - for _, node := range Nodes(s.Cluster.Nodes).FilterHost(s.Host) { - client, err := NewClient(node.Host) - if err != nil { - return err - } - - // Retrieve attributes from differing blocks. - // Skip update and recomputation if no attributes have changed. - m, err := client.ProfileAttrDiff(context.Background(), db, blks) - if err != nil { - return err - } else if len(m) == 0 { - continue - } - - // Update local copy. - if err := d.ProfileAttrStore().SetBulkAttrs(m); err != nil { - return err - } - - // Recompute blocks. - blks, err = d.ProfileAttrStore().Blocks() - if err != nil { - return err + // Generate new schema from map. + idxs := make([]*IndexInfo, 0, len(m)) + for idx, frames := range m { + di := &IndexInfo{Name: idx} + for frame, views := range frames { + fi := &FrameInfo{Name: frame} + for view := range views { + fi.Views = append(fi.Views, &ViewInfo{Name: view}) + } + sort.Sort(viewInfoSlice(fi.Views)) + di.Frames = append(di.Frames, fi) } + sort.Sort(frameInfoSlice(di.Frames)) + idxs = append(idxs, di) } + sort.Sort(indexInfoSlice(idxs)) - return nil + return idxs } -// syncFrame synchronizes frame attributes with the rest of the cluster. -func (s *IndexSyncer) syncFrame(db, name string) error { - // Retrieve database reference. - f := s.Index.Frame(db, name) - if f == nil { - return nil +// encodeIndexes converts a into its internal representation. +func encodeIndexes(a []*Index) []*internal.Index { + other := make([]*internal.Index, len(a)) + for i := range a { + other[i] = encodeIndex(a[i]) } + return other +} - // Read block checksums. - blks, err := f.BitmapAttrStore().Blocks() - if err != nil { - return err - } - - // Sync with every other host. - for _, node := range Nodes(s.Cluster.Nodes).FilterHost(s.Host) { - client, err := NewClient(node.Host) - if err != nil { - return err - } - - // Retrieve attributes from differing blocks. - // Skip update and recomputation if no attributes have changed. - m, err := client.BitmapAttrDiff(context.Background(), db, name, blks) - if err == ErrFrameNotFound { - continue // frame not created remotely yet, skip - } else if err != nil { - return err - } else if len(m) == 0 { - continue - } - - // Update local copy. - if err := f.BitmapAttrStore().SetBulkAttrs(m); err != nil { - return err - } - - // Recompute blocks. - blks, err = f.BitmapAttrStore().Blocks() - if err != nil { - return err - } +// encodeIndex converts d into its internal representation. +func encodeIndex(d *Index) *internal.Index { + return &internal.Index{ + Name: d.name, + Meta: &internal.IndexMeta{ + ColumnLabel: d.columnLabel, + TimeQuantum: string(d.timeQuantum), + }, + MaxSlice: d.remoteMaxSlice, + Frames: encodeFrames(d.Frames()), } - - return nil } -// syncFragment synchronizes a fragment with the rest of the cluster. -func (s *IndexSyncer) syncFragment(db, frame, view string, slice uint64) error { - // Retrieve local frame. - f := s.Index.Frame(db, frame) - if f == nil { - return ErrFrameNotFound - } +// IndexOptions represents options to set when initializing an index. +type IndexOptions struct { + ColumnLabel string `json:"columnLabel,omitempty"` + TimeQuantum TimeQuantum `json:"timeQuantum,omitempty"` +} - // Ensure view exists locally. - v, err := f.CreateViewIfNotExists(view) - if err != nil { - return err +// Encode converts o into its internal representation. +func (o *IndexOptions) Encode() *internal.IndexMeta { + return &internal.IndexMeta{ + ColumnLabel: o.ColumnLabel, + TimeQuantum: string(o.TimeQuantum), } +} - // Ensure fragment exists locally. - frag, err := v.CreateFragmentIfNotExists(slice) - if err != nil { - return err +// hasTime returns true if a contains a non-nil time. +func hasTime(a []*time.Time) bool { + for _, t := range a { + if t != nil { + return true + } } + return false +} - // Sync fragments together. - fs := FragmentSyncer{ - Fragment: frag, - Host: s.Host, - Cluster: s.Cluster, - Closing: s.Closing, - } - if err := fs.SyncFragment(); err != nil { - return err - } +type importKey struct { + View string + Slice uint64 +} - return nil +type importData struct { + RowIDs []uint64 + ColumnIDs []uint64 } diff --git a/index_test.go b/index_test.go index 4a1be4cf2..67b5b8b2c 100644 --- a/index_test.go +++ b/index_test.go @@ -1,234 +1,179 @@ package pilosa_test import ( - "bytes" - "context" "io/ioutil" "os" - "reflect" "testing" "github.com/pilosa/pilosa" - "github.com/pilosa/pilosa/pql" ) -// Ensure index can delete a database and its underlying files. -func TestIndex_DeleteDB(t *testing.T) { - idx := MustOpenIndex() - defer idx.Close() +// Ensure index can open and retrieve a frame. +func TestIndex_CreateFrameIfNotExists(t *testing.T) { + index := MustOpenIndex() + defer index.Close() - // Write bits to separate databases. - f0 := idx.MustCreateFragmentIfNotExists("d0", "f", pilosa.ViewStandard, 0) - if _, err := f0.SetBit(100, 200); err != nil { - t.Fatal(err) - } - f1 := idx.MustCreateFragmentIfNotExists("d1", "f", pilosa.ViewStandard, 0) - if _, err := f1.SetBit(100, 200); err != nil { - t.Fatal(err) - } - - // Ensure d0 exists. - if _, err := os.Stat(idx.DBPath("d0")); err != nil { + // Create frame. + f, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) + if err != nil { t.Fatal(err) + } else if f == nil { + t.Fatal("expected frame") } - // Delete d0. - if err := idx.DeleteDB("d0"); err != nil { + // Retrieve existing frame. + other, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{}) + if err != nil { t.Fatal(err) + } else if f.Frame != other.Frame { + t.Fatal("frame mismatch") } - // Ensure d0 files are removed & d1 still exists. - if _, err := os.Stat(idx.DBPath("d0")); !os.IsNotExist(err) { - t.Fatal("expected d0 file deletion") - } else if _, err := os.Stat(idx.DBPath("d1")); err != nil { - t.Fatal("expected d1 files to still exist", err) + if f.Frame != index.Frame("f") { + t.Fatal("frame mismatch") } } -// Ensure index can sync with a remote index. -func TestIndexSyncer_SyncIndex(t *testing.T) { - cluster := NewCluster(2) - - // Create a local index. - idx0 := MustOpenIndex() - defer idx0.Close() - - // Create a remote index wrapped by an HTTP - idx1 := MustOpenIndex() - defer idx1.Close() - s := NewServer() - defer s.Close() - s.Handler.Index = idx1.Index - s.Handler.Executor.ExecuteFn = func(ctx context.Context, db string, query *pql.Query, slices []uint64, opt *pilosa.ExecOptions) ([]interface{}, error) { - e := pilosa.NewExecutor() - e.Index = idx1.Index - e.Host = cluster.Nodes[1].Host - e.Cluster = cluster - return e.Execute(ctx, db, query, slices, opt) - } - - // Mock 2-node, fully replicated cluster. - cluster.ReplicaN = 2 - cluster.Nodes[0].Host = "localhost:0" - cluster.Nodes[1].Host = MustParseURLHost(s.URL) - - // Create frames on nodes. - for _, idx := range []*Index{idx0, idx1} { - idx.MustCreateFrameIfNotExists("d", "f") - idx.MustCreateFrameIfNotExists("d", "f0") - idx.MustCreateFrameIfNotExists("y", "z") - } - - // Set data on the local index. - f := idx0.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0) - if _, err := f.SetBit(0, 10); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(2, 20); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(120, 10); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(200, 4); err != nil { +// Ensure index defaults the time quantum on new frames. +func TestIndex_CreateFrame_TimeQuantum(t *testing.T) { + index := MustOpenIndex() + defer index.Close() + + // Set index time quantum. + if err := index.SetTimeQuantum(pilosa.TimeQuantum("YM")); err != nil { t.Fatal(err) } - f = idx0.MustCreateFragmentIfNotExists("d", "f0", pilosa.ViewStandard, 1) - if _, err := f.SetBit(9, SliceWidth+5); err != nil { + // Create frame. + f, err := index.CreateFrame("f", pilosa.FrameOptions{}) + if err != nil { t.Fatal(err) + } else if q := f.TimeQuantum(); q != pilosa.TimeQuantum("YM") { + t.Fatalf("unexpected frame time quantum: %s", q) } +} - idx0.MustCreateFragmentIfNotExists("y", "z", pilosa.ViewStandard, 0) +// Ensure index can delete a frame. +func TestIndex_DeleteFrame(t *testing.T) { + index := MustOpenIndex() + defer index.Close() - // Set data on the remote index. - f = idx1.MustCreateFragmentIfNotExists("d", "f", pilosa.ViewStandard, 0) - if _, err := f.SetBit(0, 4000); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(3, 10); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(120, 10); err != nil { + // Create frame. + if _, err := index.CreateFrameIfNotExists("f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } - f = idx1.MustCreateFragmentIfNotExists("y", "z", pilosa.ViewStandard, 3) - if _, err := f.SetBit(10, (3*SliceWidth)+4); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(10, (3*SliceWidth)+5); err != nil { - t.Fatal(err) - } else if _, err := f.SetBit(10, (3*SliceWidth)+7); err != nil { + // Delete frame & verify it's gone. + if err := index.DeleteFrame("f"); err != nil { t.Fatal(err) + } else if index.Frame("f") != nil { + t.Fatal("expected nil frame") } - // Set highest slice. - idx0.DB("d").SetRemoteMaxSlice(1) - idx0.DB("y").SetRemoteMaxSlice(3) - - // Set up syncer. - syncer := pilosa.IndexSyncer{ - Index: idx0.Index, - Host: cluster.Nodes[0].Host, - Cluster: cluster, + // Delete again to make sure it doesn't error. + if err := index.DeleteFrame("f"); err != nil { + t.Fatal(err) } +} - if err := syncer.SyncIndex(); err != nil { +// Ensure index can set the default time quantum. +func TestIndex_SetTimeQuantum(t *testing.T) { + index := MustOpenIndex() + defer index.Close() + + // Set & retrieve time quantum. + if err := index.SetTimeQuantum(pilosa.TimeQuantum("YMDH")); err != nil { t.Fatal(err) + } else if q := index.TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { + t.Fatalf("unexpected quantum: %s", q) } - // Verify data is the same on both nodes. - for i, idx := range []*Index{idx0, idx1} { - f := idx.Fragment("d", "f", pilosa.ViewStandard, 0) - if a := f.Bitmap(0).Bits(); !reflect.DeepEqual(a, []uint64{10, 4000}) { - t.Fatalf("unexpected bits(%d/0): %+v", i, a) - } else if a := f.Bitmap(2).Bits(); !reflect.DeepEqual(a, []uint64{20}) { - t.Fatalf("unexpected bits(%d/2): %+v", i, a) - } else if a := f.Bitmap(3).Bits(); !reflect.DeepEqual(a, []uint64{10}) { - t.Fatalf("unexpected bits(%d/3): %+v", i, a) - } else if a := f.Bitmap(120).Bits(); !reflect.DeepEqual(a, []uint64{10}) { - t.Fatalf("unexpected bits(%d/120): %+v", i, a) - } else if a := f.Bitmap(200).Bits(); !reflect.DeepEqual(a, []uint64{4}) { - t.Fatalf("unexpected bits(%d/200): %+v", i, a) - } - - f = idx.Fragment("d", "f0", pilosa.ViewStandard, 1) - a := f.Bitmap(9).Bits() - if !reflect.DeepEqual(a, []uint64{SliceWidth + 5}) { - t.Fatalf("unexpected bits(%d/d/f0): %+v", i, a) - } - if a := f.Bitmap(9).Bits(); !reflect.DeepEqual(a, []uint64{SliceWidth + 5}) { - t.Fatalf("unexpected bits(%d/d/f0): %+v", i, a) - } - f = idx.Fragment("y", "z", pilosa.ViewStandard, 3) - if a := f.Bitmap(10).Bits(); !reflect.DeepEqual(a, []uint64{(3 * SliceWidth) + 4, (3 * SliceWidth) + 5, (3 * SliceWidth) + 7}) { - t.Fatalf("unexpected bits(%d/y/z): %+v", i, a) - } + // Reload index and verify that it is persisted. + if err := index.Reopen(); err != nil { + t.Fatal(err) + } else if q := index.TimeQuantum(); q != pilosa.TimeQuantum("YMDH") { + t.Fatalf("unexpected quantum (reopen): %s", q) } } -// Index is a test wrapper for pilosa.Index. +// Index represents a test wrapper for pilosa.Index. type Index struct { *pilosa.Index - LogOutput bytes.Buffer } -// NewIndex returns a new instance of Index with a temporary path. +// NewIndex returns a new instance of Index. func NewIndex() *Index { - path, err := ioutil.TempDir("", "pilosa-") + path, err := ioutil.TempDir("", "pilosa-index-") if err != nil { panic(err) } - - i := &Index{Index: pilosa.NewIndex()} - i.Path = path - i.Index.LogOutput = &i.LogOutput - - return i + index, err := pilosa.NewIndex(path, "i") + if err != nil { + panic(err) + } + return &Index{Index: index} } -// MustOpenIndex creates and opens an index at a temporary path. Panic on error. +// MustOpenIndex returns a new, opened index at a temporary path. Panic on error. func MustOpenIndex() *Index { - i := NewIndex() - if err := i.Open(); err != nil { + index := NewIndex() + if err := index.Open(); err != nil { panic(err) } - return i + return index } -// Close closes the index and removes all underlying data. +// Close closes the index and removes the underlying data. func (i *Index) Close() error { - defer os.RemoveAll(i.Path) + defer os.RemoveAll(i.Path()) return i.Index.Close() } -// MustCreateDBIfNotExists returns a given db. Panic on error. -func (i *Index) MustCreateDBIfNotExists(db string, opt pilosa.DBOptions) *DB { - d, err := i.Index.CreateDBIfNotExists(db, opt) +// Reopen closes the index and reopens it. +func (i *Index) Reopen() error { + var err error + if err := i.Index.Close(); err != nil { + return err + } + + path, name := i.Path(), i.Name() + i.Index, err = pilosa.NewIndex(path, name) if err != nil { - panic(err) + return err } - return &DB{DB: d} + + if err := i.Open(); err != nil { + return err + } + return nil } -// MustCreateFrameIfNotExists returns a given frame. Panic on error. -func (i *Index) MustCreateFrameIfNotExists(db, frame string) *Frame { - f, err := i.MustCreateDBIfNotExists(db, pilosa.DBOptions{}).CreateFrameIfNotExists(frame, pilosa.FrameOptions{}) +// CreateFrame creates a frame with the given options. +func (i *Index) CreateFrame(name string, opt pilosa.FrameOptions) (*Frame, error) { + f, err := i.Index.CreateFrame(name, opt) if err != nil { - panic(err) + return nil, err } - return f + return &Frame{Frame: f}, nil } -// MustCreateFragmentIfNotExists returns a given fragment. Panic on error. -func (i *Index) MustCreateFragmentIfNotExists(db, frame, view string, slice uint64) *Fragment { - d := i.MustCreateDBIfNotExists(db, pilosa.DBOptions{}) - f, err := d.CreateFrameIfNotExists(frame, pilosa.FrameOptions{}) +// CreateFrameIfNotExists creates a frame with the given options if it doesn't exist. +func (i *Index) CreateFrameIfNotExists(name string, opt pilosa.FrameOptions) (*Frame, error) { + f, err := i.Index.CreateFrameIfNotExists(name, opt) if err != nil { - panic(err) + return nil, err } - v, err := f.CreateViewIfNotExists(view) + return &Frame{Frame: f}, nil +} + +// Ensure index can delete a frame. +func TestIndex_InvalidName(t *testing.T) { + path, err := ioutil.TempDir("", "pilosa-index-") if err != nil { panic(err) } - frag, err := v.CreateFragmentIfNotExists(slice) - if err != nil { - panic(err) + index, err := pilosa.NewIndex(path, "ABC") + if index != nil { + t.Fatalf("unexpected index name %s", index) } - return &Fragment{Fragment: frag} } diff --git a/internal/private.pb.go b/internal/private.pb.go index 0423cfa08..a41df4788 100644 --- a/internal/private.pb.go +++ b/internal/private.pb.go @@ -9,7 +9,7 @@ private.proto It has these top-level messages: - DBMeta + IndexMeta FrameMeta ImportResponse BlockDataRequest @@ -17,13 +17,14 @@ Cache MaxSlicesResponse CreateSliceMessage - DeleteDBMessage - CreateDBMessage + DeleteIndexMessage + CreateIndexMessage CreateFrameMessage DeleteFrameMessage Frame - DB - NodeState + Index + NodeStatus + ClusterStatus */ package internal @@ -44,15 +45,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type DBMeta struct { +type IndexMeta struct { ColumnLabel string `protobuf:"bytes,1,opt,name=ColumnLabel,proto3" json:"ColumnLabel,omitempty"` TimeQuantum string `protobuf:"bytes,2,opt,name=TimeQuantum,proto3" json:"TimeQuantum,omitempty"` } -func (m *DBMeta) Reset() { *m = DBMeta{} } -func (m *DBMeta) String() string { return proto.CompactTextString(m) } -func (*DBMeta) ProtoMessage() {} -func (*DBMeta) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{0} } +func (m *IndexMeta) Reset() { *m = IndexMeta{} } +func (m *IndexMeta) String() string { return proto.CompactTextString(m) } +func (*IndexMeta) ProtoMessage() {} +func (*IndexMeta) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{0} } type FrameMeta struct { RowLabel string `protobuf:"bytes,1,opt,name=RowLabel,proto3" json:"RowLabel,omitempty"` @@ -77,7 +78,7 @@ func (*ImportResponse) ProtoMessage() {} func (*ImportResponse) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{2} } type BlockDataRequest struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` Frame string `protobuf:"bytes,2,opt,name=Frame,proto3" json:"Frame,omitempty"` View string `protobuf:"bytes,5,opt,name=View,proto3" json:"View,omitempty"` Slice uint64 `protobuf:"varint,4,opt,name=Slice,proto3" json:"Slice,omitempty"` @@ -90,8 +91,8 @@ func (*BlockDataRequest) ProtoMessage() {} func (*BlockDataRequest) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{3} } type BlockDataResponse struct { - BitmapIDs []uint64 `protobuf:"varint,1,rep,packed,name=BitmapIDs" json:"BitmapIDs,omitempty"` - ProfileIDs []uint64 `protobuf:"varint,2,rep,packed,name=ProfileIDs" json:"ProfileIDs,omitempty"` + RowIDs []uint64 `protobuf:"varint,1,rep,packed,name=RowIDs" json:"RowIDs,omitempty"` + ColumnIDs []uint64 `protobuf:"varint,2,rep,packed,name=ColumnIDs" json:"ColumnIDs,omitempty"` } func (m *BlockDataResponse) Reset() { *m = BlockDataResponse{} } @@ -100,7 +101,7 @@ func (*BlockDataResponse) ProtoMessage() {} func (*BlockDataResponse) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{4} } type Cache struct { - BitmapIDs []uint64 `protobuf:"varint,1,rep,packed,name=BitmapIDs" json:"BitmapIDs,omitempty"` + IDs []uint64 `protobuf:"varint,1,rep,packed,name=IDs" json:"IDs,omitempty"` } func (m *Cache) Reset() { *m = Cache{} } @@ -125,7 +126,7 @@ func (m *MaxSlicesResponse) GetMaxSlices() map[string]uint64 { } type CreateSliceMessage struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` Slice uint64 `protobuf:"varint,2,opt,name=Slice,proto3" json:"Slice,omitempty"` } @@ -134,26 +135,26 @@ func (m *CreateSliceMessage) String() string { return proto.CompactTe func (*CreateSliceMessage) ProtoMessage() {} func (*CreateSliceMessage) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{7} } -type DeleteDBMessage struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` +type DeleteIndexMessage struct { + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` } -func (m *DeleteDBMessage) Reset() { *m = DeleteDBMessage{} } -func (m *DeleteDBMessage) String() string { return proto.CompactTextString(m) } -func (*DeleteDBMessage) ProtoMessage() {} -func (*DeleteDBMessage) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{8} } +func (m *DeleteIndexMessage) Reset() { *m = DeleteIndexMessage{} } +func (m *DeleteIndexMessage) String() string { return proto.CompactTextString(m) } +func (*DeleteIndexMessage) ProtoMessage() {} +func (*DeleteIndexMessage) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{8} } -type CreateDBMessage struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` - Meta *DBMeta `protobuf:"bytes,2,opt,name=Meta" json:"Meta,omitempty"` +type CreateIndexMessage struct { + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` + Meta *IndexMeta `protobuf:"bytes,2,opt,name=Meta" json:"Meta,omitempty"` } -func (m *CreateDBMessage) Reset() { *m = CreateDBMessage{} } -func (m *CreateDBMessage) String() string { return proto.CompactTextString(m) } -func (*CreateDBMessage) ProtoMessage() {} -func (*CreateDBMessage) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{9} } +func (m *CreateIndexMessage) Reset() { *m = CreateIndexMessage{} } +func (m *CreateIndexMessage) String() string { return proto.CompactTextString(m) } +func (*CreateIndexMessage) ProtoMessage() {} +func (*CreateIndexMessage) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{9} } -func (m *CreateDBMessage) GetMeta() *DBMeta { +func (m *CreateIndexMessage) GetMeta() *IndexMeta { if m != nil { return m.Meta } @@ -161,7 +162,7 @@ func (m *CreateDBMessage) GetMeta() *DBMeta { } type CreateFrameMessage struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` Frame string `protobuf:"bytes,2,opt,name=Frame,proto3" json:"Frame,omitempty"` Meta *FrameMeta `protobuf:"bytes,3,opt,name=Meta" json:"Meta,omitempty"` } @@ -179,7 +180,7 @@ func (m *CreateFrameMessage) GetMeta() *FrameMeta { } type DeleteFrameMessage struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` Frame string `protobuf:"bytes,2,opt,name=Frame,proto3" json:"Frame,omitempty"` } @@ -205,52 +206,68 @@ func (m *Frame) GetMeta() *FrameMeta { return nil } -type DB struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Meta *DBMeta `protobuf:"bytes,2,opt,name=Meta" json:"Meta,omitempty"` - MaxSlice uint64 `protobuf:"varint,3,opt,name=MaxSlice,proto3" json:"MaxSlice,omitempty"` - Frames []*Frame `protobuf:"bytes,4,rep,name=Frames" json:"Frames,omitempty"` +type Index struct { + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Meta *IndexMeta `protobuf:"bytes,2,opt,name=Meta" json:"Meta,omitempty"` + MaxSlice uint64 `protobuf:"varint,3,opt,name=MaxSlice,proto3" json:"MaxSlice,omitempty"` + Frames []*Frame `protobuf:"bytes,4,rep,name=Frames" json:"Frames,omitempty"` } -func (m *DB) Reset() { *m = DB{} } -func (m *DB) String() string { return proto.CompactTextString(m) } -func (*DB) ProtoMessage() {} -func (*DB) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{13} } +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} +func (*Index) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{13} } -func (m *DB) GetMeta() *DBMeta { +func (m *Index) GetMeta() *IndexMeta { if m != nil { return m.Meta } return nil } -func (m *DB) GetFrames() []*Frame { +func (m *Index) GetFrames() []*Frame { if m != nil { return m.Frames } return nil } -type NodeState struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - State string `protobuf:"bytes,2,opt,name=State,proto3" json:"State,omitempty"` - DBs []*DB `protobuf:"bytes,3,rep,name=DBs" json:"DBs,omitempty"` +type NodeStatus struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + State string `protobuf:"bytes,2,opt,name=State,proto3" json:"State,omitempty"` + Indexes []*Index `protobuf:"bytes,3,rep,name=Indexes" json:"Indexes,omitempty"` } -func (m *NodeState) Reset() { *m = NodeState{} } -func (m *NodeState) String() string { return proto.CompactTextString(m) } -func (*NodeState) ProtoMessage() {} -func (*NodeState) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{14} } +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (m *NodeStatus) String() string { return proto.CompactTextString(m) } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{14} } -func (m *NodeState) GetDBs() []*DB { +func (m *NodeStatus) GetIndexes() []*Index { if m != nil { - return m.DBs + return m.Indexes + } + return nil +} + +type ClusterStatus struct { + Nodes []*NodeStatus `protobuf:"bytes,1,rep,name=Nodes" json:"Nodes,omitempty"` +} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} +func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptorPrivate, []int{15} } + +func (m *ClusterStatus) GetNodes() []*NodeStatus { + if m != nil { + return m.Nodes } return nil } func init() { - proto.RegisterType((*DBMeta)(nil), "internal.DBMeta") + proto.RegisterType((*IndexMeta)(nil), "internal.IndexMeta") proto.RegisterType((*FrameMeta)(nil), "internal.FrameMeta") proto.RegisterType((*ImportResponse)(nil), "internal.ImportResponse") proto.RegisterType((*BlockDataRequest)(nil), "internal.BlockDataRequest") @@ -258,15 +275,16 @@ func init() { proto.RegisterType((*Cache)(nil), "internal.Cache") proto.RegisterType((*MaxSlicesResponse)(nil), "internal.MaxSlicesResponse") proto.RegisterType((*CreateSliceMessage)(nil), "internal.CreateSliceMessage") - proto.RegisterType((*DeleteDBMessage)(nil), "internal.DeleteDBMessage") - proto.RegisterType((*CreateDBMessage)(nil), "internal.CreateDBMessage") + proto.RegisterType((*DeleteIndexMessage)(nil), "internal.DeleteIndexMessage") + proto.RegisterType((*CreateIndexMessage)(nil), "internal.CreateIndexMessage") proto.RegisterType((*CreateFrameMessage)(nil), "internal.CreateFrameMessage") proto.RegisterType((*DeleteFrameMessage)(nil), "internal.DeleteFrameMessage") proto.RegisterType((*Frame)(nil), "internal.Frame") - proto.RegisterType((*DB)(nil), "internal.DB") - proto.RegisterType((*NodeState)(nil), "internal.NodeState") + proto.RegisterType((*Index)(nil), "internal.Index") + proto.RegisterType((*NodeStatus)(nil), "internal.NodeStatus") + proto.RegisterType((*ClusterStatus)(nil), "internal.ClusterStatus") } -func (m *DBMeta) Marshal() (dAtA []byte, err error) { +func (m *IndexMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -276,7 +294,7 @@ func (m *DBMeta) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DBMeta) MarshalTo(dAtA []byte) (int, error) { +func (m *IndexMeta) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -386,11 +404,11 @@ func (m *BlockDataRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if len(m.Frame) > 0 { dAtA[i] = 0x12 @@ -432,10 +450,10 @@ func (m *BlockDataResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.BitmapIDs) > 0 { - dAtA2 := make([]byte, len(m.BitmapIDs)*10) + if len(m.RowIDs) > 0 { + dAtA2 := make([]byte, len(m.RowIDs)*10) var j1 int - for _, num := range m.BitmapIDs { + for _, num := range m.RowIDs { for num >= 1<<7 { dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -449,10 +467,10 @@ func (m *BlockDataResponse) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPrivate(dAtA, i, uint64(j1)) i += copy(dAtA[i:], dAtA2[:j1]) } - if len(m.ProfileIDs) > 0 { - dAtA4 := make([]byte, len(m.ProfileIDs)*10) + if len(m.ColumnIDs) > 0 { + dAtA4 := make([]byte, len(m.ColumnIDs)*10) var j3 int - for _, num := range m.ProfileIDs { + for _, num := range m.ColumnIDs { for num >= 1<<7 { dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -484,10 +502,10 @@ func (m *Cache) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.BitmapIDs) > 0 { - dAtA6 := make([]byte, len(m.BitmapIDs)*10) + if len(m.IDs) > 0 { + dAtA6 := make([]byte, len(m.IDs)*10) var j5 int - for _, num := range m.BitmapIDs { + for _, num := range m.IDs { for num >= 1<<7 { dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -553,11 +571,11 @@ func (m *CreateSliceMessage) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if m.Slice != 0 { dAtA[i] = 0x10 @@ -567,7 +585,7 @@ func (m *CreateSliceMessage) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *DeleteDBMessage) Marshal() (dAtA []byte, err error) { +func (m *DeleteIndexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -577,21 +595,21 @@ func (m *DeleteDBMessage) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteDBMessage) MarshalTo(dAtA []byte) (int, error) { +func (m *DeleteIndexMessage) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } return i, nil } -func (m *CreateDBMessage) Marshal() (dAtA []byte, err error) { +func (m *CreateIndexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -601,16 +619,16 @@ func (m *CreateDBMessage) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateDBMessage) MarshalTo(dAtA []byte) (int, error) { +func (m *CreateIndexMessage) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if m.Meta != nil { dAtA[i] = 0x12 @@ -640,11 +658,11 @@ func (m *CreateFrameMessage) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if len(m.Frame) > 0 { dAtA[i] = 0x12 @@ -680,11 +698,11 @@ func (m *DeleteFrameMessage) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPrivate(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPrivate(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if len(m.Frame) > 0 { dAtA[i] = 0x12 @@ -729,7 +747,7 @@ func (m *Frame) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *DB) Marshal() (dAtA []byte, err error) { +func (m *Index) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -739,7 +757,7 @@ func (m *DB) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DB) MarshalTo(dAtA []byte) (int, error) { +func (m *Index) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -780,7 +798,7 @@ func (m *DB) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *NodeState) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -790,7 +808,7 @@ func (m *NodeState) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NodeState) MarshalTo(dAtA []byte) (int, error) { +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -807,8 +825,8 @@ func (m *NodeState) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPrivate(dAtA, i, uint64(len(m.State))) i += copy(dAtA[i:], m.State) } - if len(m.DBs) > 0 { - for _, msg := range m.DBs { + if len(m.Indexes) > 0 { + for _, msg := range m.Indexes { dAtA[i] = 0x1a i++ i = encodeVarintPrivate(dAtA, i, uint64(msg.Size())) @@ -822,6 +840,36 @@ func (m *NodeState) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ClusterStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintPrivate(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func encodeFixed64Private(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -849,7 +897,7 @@ func encodeVarintPrivate(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *DBMeta) Size() (n int) { +func (m *IndexMeta) Size() (n int) { var l int _ = l l = len(m.ColumnLabel) @@ -900,7 +948,7 @@ func (m *ImportResponse) Size() (n int) { func (m *BlockDataRequest) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } @@ -924,16 +972,16 @@ func (m *BlockDataRequest) Size() (n int) { func (m *BlockDataResponse) Size() (n int) { var l int _ = l - if len(m.BitmapIDs) > 0 { + if len(m.RowIDs) > 0 { l = 0 - for _, e := range m.BitmapIDs { + for _, e := range m.RowIDs { l += sovPrivate(uint64(e)) } n += 1 + sovPrivate(uint64(l)) + l } - if len(m.ProfileIDs) > 0 { + if len(m.ColumnIDs) > 0 { l = 0 - for _, e := range m.ProfileIDs { + for _, e := range m.ColumnIDs { l += sovPrivate(uint64(e)) } n += 1 + sovPrivate(uint64(l)) + l @@ -944,9 +992,9 @@ func (m *BlockDataResponse) Size() (n int) { func (m *Cache) Size() (n int) { var l int _ = l - if len(m.BitmapIDs) > 0 { + if len(m.IDs) > 0 { l = 0 - for _, e := range m.BitmapIDs { + for _, e := range m.IDs { l += sovPrivate(uint64(e)) } n += 1 + sovPrivate(uint64(l)) + l @@ -971,7 +1019,7 @@ func (m *MaxSlicesResponse) Size() (n int) { func (m *CreateSliceMessage) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } @@ -981,20 +1029,20 @@ func (m *CreateSliceMessage) Size() (n int) { return n } -func (m *DeleteDBMessage) Size() (n int) { +func (m *DeleteIndexMessage) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } return n } -func (m *CreateDBMessage) Size() (n int) { +func (m *CreateIndexMessage) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } @@ -1008,7 +1056,7 @@ func (m *CreateDBMessage) Size() (n int) { func (m *CreateFrameMessage) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } @@ -1026,7 +1074,7 @@ func (m *CreateFrameMessage) Size() (n int) { func (m *DeleteFrameMessage) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } @@ -1051,7 +1099,7 @@ func (m *Frame) Size() (n int) { return n } -func (m *DB) Size() (n int) { +func (m *Index) Size() (n int) { var l int _ = l l = len(m.Name) @@ -1074,7 +1122,7 @@ func (m *DB) Size() (n int) { return n } -func (m *NodeState) Size() (n int) { +func (m *NodeStatus) Size() (n int) { var l int _ = l l = len(m.Host) @@ -1085,8 +1133,20 @@ func (m *NodeState) Size() (n int) { if l > 0 { n += 1 + l + sovPrivate(uint64(l)) } - if len(m.DBs) > 0 { - for _, e := range m.DBs { + if len(m.Indexes) > 0 { + for _, e := range m.Indexes { + l = e.Size() + n += 1 + l + sovPrivate(uint64(l)) + } + } + return n +} + +func (m *ClusterStatus) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { l = e.Size() n += 1 + l + sovPrivate(uint64(l)) } @@ -1107,7 +1167,7 @@ func sovPrivate(x uint64) (n int) { func sozPrivate(x uint64) (n int) { return sovPrivate(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *DBMeta) Unmarshal(dAtA []byte) error { +func (m *IndexMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1130,10 +1190,10 @@ func (m *DBMeta) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DBMeta: wiretype end group for non-group") + return fmt.Errorf("proto: IndexMeta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DBMeta: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IndexMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1501,7 +1561,7 @@ func (m *BlockDataRequest) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1526,7 +1586,7 @@ func (m *BlockDataRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1714,7 +1774,7 @@ func (m *BlockDataResponse) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.RowIDs = append(m.RowIDs, v) } } else if wireType == 0 { var v uint64 @@ -1732,9 +1792,9 @@ func (m *BlockDataResponse) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.RowIDs = append(m.RowIDs, v) } else { - return fmt.Errorf("proto: wrong wireType = %d for field BitmapIDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowIDs", wireType) } case 2: if wireType == 2 { @@ -1776,7 +1836,7 @@ func (m *BlockDataResponse) Unmarshal(dAtA []byte) error { break } } - m.ProfileIDs = append(m.ProfileIDs, v) + m.ColumnIDs = append(m.ColumnIDs, v) } } else if wireType == 0 { var v uint64 @@ -1794,9 +1854,9 @@ func (m *BlockDataResponse) Unmarshal(dAtA []byte) error { break } } - m.ProfileIDs = append(m.ProfileIDs, v) + m.ColumnIDs = append(m.ColumnIDs, v) } else { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileIDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnIDs", wireType) } default: iNdEx = preIndex @@ -1888,7 +1948,7 @@ func (m *Cache) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.IDs = append(m.IDs, v) } } else if wireType == 0 { var v uint64 @@ -1906,9 +1966,9 @@ func (m *Cache) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.IDs = append(m.IDs, v) } else { - return fmt.Errorf("proto: wrong wireType = %d for field BitmapIDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IDs", wireType) } default: iNdEx = preIndex @@ -2118,7 +2178,7 @@ func (m *CreateSliceMessage) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2143,7 +2203,7 @@ func (m *CreateSliceMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -2185,7 +2245,7 @@ func (m *CreateSliceMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeleteDBMessage) Unmarshal(dAtA []byte) error { +func (m *DeleteIndexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2208,15 +2268,15 @@ func (m *DeleteDBMessage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteDBMessage: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteIndexMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteDBMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteIndexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2241,7 +2301,7 @@ func (m *DeleteDBMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2264,7 +2324,7 @@ func (m *DeleteDBMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateDBMessage) Unmarshal(dAtA []byte) error { +func (m *CreateIndexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2287,15 +2347,15 @@ func (m *CreateDBMessage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateDBMessage: wiretype end group for non-group") + return fmt.Errorf("proto: CreateIndexMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateDBMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateIndexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2320,7 +2380,7 @@ func (m *CreateDBMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2349,7 +2409,7 @@ func (m *CreateDBMessage) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Meta == nil { - m.Meta = &DBMeta{} + m.Meta = &IndexMeta{} } if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2407,7 +2467,7 @@ func (m *CreateFrameMessage) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2432,7 +2492,7 @@ func (m *CreateFrameMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2548,7 +2608,7 @@ func (m *DeleteFrameMessage) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2573,7 +2633,7 @@ func (m *DeleteFrameMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2737,7 +2797,7 @@ func (m *Frame) Unmarshal(dAtA []byte) error { } return nil } -func (m *DB) Unmarshal(dAtA []byte) error { +func (m *Index) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2760,10 +2820,10 @@ func (m *DB) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DB: wiretype end group for non-group") + return fmt.Errorf("proto: Index: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DB: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Index: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2822,7 +2882,7 @@ func (m *DB) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Meta == nil { - m.Meta = &DBMeta{} + m.Meta = &IndexMeta{} } if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2899,7 +2959,7 @@ func (m *DB) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeState) Unmarshal(dAtA []byte) error { +func (m *NodeStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2922,10 +2982,10 @@ func (m *NodeState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeState: wiretype end group for non-group") + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2988,7 +3048,88 @@ func (m *NodeState) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DBs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Indexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPrivate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPrivate + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indexes = append(m.Indexes, &Index{}) + if err := m.Indexes[len(m.Indexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPrivate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPrivate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPrivate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3012,8 +3153,8 @@ func (m *NodeState) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DBs = append(m.DBs, &DB{}) - if err := m.DBs[len(m.DBs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Nodes = append(m.Nodes, &NodeStatus{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3146,43 +3287,44 @@ var ( func init() { proto.RegisterFile("private.proto", fileDescriptorPrivate) } var fileDescriptorPrivate = []byte{ - // 600 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4e, 0x14, 0x41, - 0x10, 0x76, 0x7e, 0x20, 0x4c, 0x21, 0xcb, 0xd2, 0x7a, 0x98, 0x10, 0x32, 0x59, 0x3b, 0x2a, 0xc4, - 0x03, 0x07, 0xbc, 0x18, 0xe2, 0x69, 0x18, 0x14, 0x12, 0x20, 0xd2, 0x8b, 0xde, 0x7b, 0x97, 0x52, - 0x27, 0x3b, 0x7f, 0xce, 0xf4, 0x2e, 0xac, 0x57, 0x5f, 0xc2, 0xc4, 0x67, 0xf0, 0x3d, 0x3c, 0xfa, - 0x08, 0x66, 0x7d, 0x11, 0xd3, 0xdd, 0xf3, 0xe7, 0xb2, 0xf8, 0x73, 0xeb, 0xfa, 0xaa, 0xea, 0xab, - 0xaf, 0xbf, 0xa9, 0x1e, 0x58, 0xcb, 0xf2, 0x70, 0xc2, 0x05, 0xee, 0x66, 0x79, 0x2a, 0x52, 0xb2, - 0x12, 0x26, 0x02, 0xf3, 0x84, 0x47, 0xf4, 0x04, 0x96, 0x03, 0xff, 0x14, 0x05, 0x27, 0x3d, 0x58, - 0x3d, 0x48, 0xa3, 0x71, 0x9c, 0x9c, 0xf0, 0x01, 0x46, 0xae, 0xd1, 0x33, 0x76, 0x1c, 0xd6, 0x86, - 0x64, 0xc5, 0x45, 0x18, 0xe3, 0xf9, 0x98, 0x27, 0x62, 0x1c, 0xbb, 0xa6, 0xae, 0x68, 0x41, 0xf4, - 0xab, 0x01, 0xce, 0x8b, 0x9c, 0xc7, 0xa8, 0x18, 0x37, 0x61, 0x85, 0xa5, 0x57, 0x6d, 0xba, 0x3a, - 0x26, 0x8f, 0xa1, 0x73, 0x9c, 0x4c, 0x30, 0x2f, 0xf0, 0x30, 0xe1, 0x83, 0x08, 0x2f, 0x15, 0xdd, - 0x0a, 0x9b, 0x43, 0xc9, 0x16, 0x38, 0x07, 0x7c, 0xf8, 0x1e, 0x2f, 0xa6, 0x19, 0xba, 0x96, 0x22, - 0x69, 0x80, 0x3a, 0xdb, 0x0f, 0x3f, 0xa2, 0x6b, 0xf7, 0x8c, 0x9d, 0x35, 0xd6, 0x00, 0xf3, 0x7a, - 0x97, 0x6e, 0xea, 0xa5, 0xd0, 0x39, 0x8e, 0xb3, 0x34, 0x17, 0x0c, 0x8b, 0x2c, 0x4d, 0x0a, 0x24, - 0x5d, 0xb0, 0x0e, 0xf3, 0xbc, 0x94, 0x2b, 0x8f, 0xf4, 0x1a, 0xba, 0x7e, 0x94, 0x0e, 0x47, 0x01, - 0x17, 0x9c, 0xe1, 0x87, 0x31, 0x16, 0x82, 0x74, 0xc0, 0x0c, 0xfc, 0xb2, 0xc8, 0x0c, 0x7c, 0x72, - 0x1f, 0x96, 0xd4, 0xb5, 0x4b, 0x4f, 0x74, 0x20, 0x51, 0xd5, 0xa9, 0x74, 0xdb, 0x4c, 0x07, 0x12, - 0xed, 0x47, 0xe1, 0x50, 0xeb, 0xb5, 0x99, 0x0e, 0x08, 0x01, 0xfb, 0x4d, 0x88, 0x57, 0xa5, 0x48, - 0x75, 0xa6, 0xe7, 0xb0, 0xd1, 0x9a, 0x5c, 0x0a, 0xdc, 0x02, 0xc7, 0x0f, 0x45, 0xcc, 0xb3, 0xe3, - 0xa0, 0x70, 0x8d, 0x9e, 0xb5, 0x63, 0xb3, 0x06, 0x20, 0x1e, 0xc0, 0xab, 0x3c, 0x7d, 0x1b, 0x46, - 0x28, 0xd3, 0xa6, 0x4a, 0xb7, 0x10, 0xfa, 0x08, 0x96, 0x94, 0x3f, 0x7f, 0xa6, 0xa1, 0x5f, 0x0c, - 0xd8, 0x38, 0xe5, 0xd7, 0x4a, 0x5a, 0x51, 0x8f, 0x3e, 0x02, 0xa7, 0x06, 0x55, 0xcf, 0xea, 0xde, - 0x93, 0xdd, 0x6a, 0x93, 0x76, 0x6f, 0xd4, 0x37, 0xc8, 0x61, 0x22, 0xf2, 0x29, 0x6b, 0x9a, 0x37, - 0x9f, 0x43, 0xe7, 0xf7, 0xa4, 0xf4, 0x7d, 0x84, 0xd3, 0xca, 0xf7, 0x11, 0x4e, 0xa5, 0x4f, 0x13, - 0x1e, 0x8d, 0xb5, 0xa7, 0x36, 0xd3, 0xc1, 0xbe, 0xf9, 0xcc, 0xa0, 0xfb, 0x40, 0x0e, 0x72, 0xe4, - 0x02, 0x15, 0xc1, 0x29, 0x16, 0x05, 0x7f, 0x87, 0x8b, 0xbe, 0x89, 0xf6, 0xd9, 0x6c, 0xf9, 0x4c, - 0x1f, 0xc0, 0x7a, 0x80, 0x11, 0x0a, 0x94, 0x5b, 0xbf, 0xb0, 0x91, 0xbe, 0x84, 0x75, 0x4d, 0x7f, - 0x6b, 0x09, 0x79, 0x08, 0xb6, 0xdc, 0x70, 0x45, 0xbd, 0xba, 0xd7, 0x6d, 0x4c, 0xd0, 0x6f, 0x89, - 0xa9, 0x2c, 0x1d, 0x56, 0x3a, 0xcb, 0x27, 0x71, 0xab, 0xce, 0x05, 0xbb, 0xb3, 0x5d, 0x4e, 0xb0, - 0xd4, 0x84, 0x7b, 0xcd, 0x84, 0xfa, 0x79, 0x95, 0x43, 0xf6, 0x81, 0xe8, 0x0b, 0xfd, 0xff, 0x10, - 0x1a, 0x94, 0xa8, 0xdc, 0xbe, 0x33, 0x99, 0xd5, 0x0d, 0xea, 0x5c, 0x2b, 0x30, 0xff, 0xa6, 0xe0, - 0x93, 0x21, 0x87, 0x2d, 0xe4, 0xf8, 0x27, 0x9f, 0xe4, 0x7f, 0xa2, 0xda, 0x86, 0xf2, 0xa9, 0xd4, - 0x31, 0xd9, 0x86, 0x65, 0x35, 0xaf, 0x70, 0x6d, 0xb5, 0x70, 0xeb, 0x73, 0x3a, 0x58, 0x99, 0xa6, - 0xaf, 0xc1, 0x39, 0x4b, 0x2f, 0xb1, 0x2f, 0xb8, 0x50, 0xf7, 0x39, 0x4a, 0x0b, 0x51, 0x69, 0x91, - 0x67, 0xb5, 0x0f, 0x32, 0x59, 0x59, 0xa0, 0x2b, 0x3d, 0xb0, 0x02, 0xbf, 0x70, 0x2d, 0x45, 0x7e, - 0xb7, 0x2d, 0x90, 0xc9, 0x84, 0xdf, 0xfd, 0x36, 0xf3, 0x8c, 0xef, 0x33, 0xcf, 0xf8, 0x31, 0xf3, - 0x8c, 0xcf, 0x3f, 0xbd, 0x3b, 0x83, 0x65, 0xf5, 0x0b, 0x7d, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, - 0x3a, 0x23, 0x0f, 0xb4, 0x53, 0x05, 0x00, 0x00, + // 617 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xc5, 0x89, 0x53, 0x9a, 0xa9, 0x5a, 0xda, 0xa5, 0x42, 0xa6, 0x42, 0x51, 0xb4, 0x07, 0x5a, + 0x7a, 0xe8, 0xa1, 0x5c, 0x10, 0x70, 0xa8, 0x9a, 0x04, 0x35, 0x12, 0x29, 0x62, 0x53, 0x71, 0xdf, + 0x24, 0x23, 0xb0, 0xe2, 0xd8, 0xc1, 0xbb, 0x4e, 0x1a, 0x0e, 0xdc, 0xf9, 0x03, 0x24, 0xbe, 0x81, + 0xff, 0xe0, 0xc8, 0x27, 0xa0, 0xf0, 0x23, 0x68, 0xc7, 0x6b, 0x3b, 0xb8, 0x94, 0x0a, 0x6e, 0x3b, + 0x6f, 0x66, 0xe7, 0xbd, 0x7d, 0x9e, 0x31, 0x6c, 0x4e, 0x63, 0x7f, 0x26, 0x35, 0x1e, 0x4d, 0xe3, + 0x48, 0x47, 0x6c, 0xdd, 0x0f, 0x35, 0xc6, 0xa1, 0x0c, 0xf8, 0x2b, 0xa8, 0x77, 0xc3, 0x11, 0x5e, + 0xf6, 0x50, 0x4b, 0xd6, 0x84, 0x8d, 0x56, 0x14, 0x24, 0x93, 0xf0, 0xa5, 0x1c, 0x60, 0xe0, 0x39, + 0x4d, 0xe7, 0xa0, 0x2e, 0x56, 0x21, 0x53, 0x71, 0xe1, 0x4f, 0xf0, 0x75, 0x22, 0x43, 0x9d, 0x4c, + 0xbc, 0x4a, 0x5a, 0xb1, 0x02, 0xf1, 0xaf, 0x0e, 0xd4, 0x5f, 0xc4, 0x72, 0x82, 0xd4, 0x71, 0x0f, + 0xd6, 0x45, 0x34, 0x5f, 0x6d, 0x97, 0xc7, 0xec, 0x21, 0x6c, 0x75, 0xc3, 0x19, 0xc6, 0x0a, 0x3b, + 0xa1, 0x1c, 0x04, 0x38, 0xa2, 0x76, 0xeb, 0xa2, 0x84, 0xb2, 0x07, 0x50, 0x6f, 0xc9, 0xe1, 0x3b, + 0xbc, 0x58, 0x4c, 0xd1, 0xab, 0x52, 0x93, 0x02, 0xc8, 0xb3, 0x7d, 0xff, 0x03, 0x7a, 0x6e, 0xd3, + 0x39, 0xd8, 0x14, 0x05, 0x50, 0xd6, 0x5b, 0xbb, 0xaa, 0x97, 0xc3, 0x56, 0x77, 0x32, 0x8d, 0x62, + 0x2d, 0x50, 0x4d, 0xa3, 0x50, 0x21, 0xdb, 0x86, 0x6a, 0x27, 0x8e, 0xad, 0x5c, 0x73, 0xe4, 0x1f, + 0x61, 0xfb, 0x34, 0x88, 0x86, 0xe3, 0xb6, 0xd4, 0x52, 0xe0, 0xfb, 0x04, 0x95, 0x66, 0xbb, 0x50, + 0x23, 0xe3, 0x6c, 0x5d, 0x1a, 0x18, 0x94, 0x1e, 0x6f, 0x9d, 0x49, 0x03, 0x83, 0xd2, 0x7d, 0x52, + 0xef, 0x8a, 0x34, 0x30, 0x68, 0x3f, 0xf0, 0x87, 0xa9, 0x6a, 0x57, 0xa4, 0x01, 0x63, 0xe0, 0xbe, + 0xf1, 0x71, 0x6e, 0xa5, 0xd2, 0x99, 0x77, 0x61, 0x67, 0x85, 0xdf, 0xca, 0xbc, 0x07, 0x6b, 0x22, + 0x9a, 0x77, 0xdb, 0xca, 0x73, 0x9a, 0xd5, 0x03, 0x57, 0xd8, 0x88, 0x0c, 0xa1, 0x2f, 0x66, 0x52, + 0x15, 0x4a, 0x15, 0x00, 0xbf, 0x0f, 0x35, 0x72, 0xc7, 0xbc, 0xb2, 0xb8, 0x6b, 0x8e, 0xfc, 0x8b, + 0x03, 0x3b, 0x3d, 0x79, 0x49, 0x32, 0x54, 0x4e, 0x73, 0x06, 0xf5, 0x1c, 0xa4, 0xea, 0x8d, 0xe3, + 0xc3, 0xa3, 0x6c, 0x7c, 0x8e, 0xae, 0xd4, 0x17, 0x48, 0x27, 0xd4, 0xf1, 0x42, 0x14, 0x97, 0xf7, + 0x9e, 0xc3, 0xd6, 0xef, 0x49, 0xa3, 0x61, 0x8c, 0x8b, 0xcc, 0xe9, 0x31, 0x2e, 0x8c, 0x27, 0x33, + 0x19, 0x24, 0xa9, 0x7f, 0xae, 0x48, 0x83, 0xa7, 0x95, 0x27, 0x0e, 0x3f, 0x01, 0xd6, 0x8a, 0x51, + 0x6a, 0xa4, 0x06, 0x3d, 0x54, 0x4a, 0xbe, 0xc5, 0xeb, 0xbf, 0x42, 0xea, 0x6c, 0x65, 0xc5, 0x59, + 0x7e, 0x08, 0xac, 0x8d, 0x01, 0x6a, 0xb4, 0x03, 0xff, 0x97, 0x0e, 0xbc, 0x9f, 0xb1, 0xdd, 0x5c, + 0xcb, 0xf6, 0xc1, 0x35, 0xb3, 0x4e, 0x64, 0x1b, 0xc7, 0x77, 0x0b, 0x73, 0xf2, 0xc5, 0x12, 0x54, + 0xc0, 0xfd, 0xac, 0xa9, 0xdd, 0x8f, 0x1b, 0x9e, 0xf0, 0x87, 0x41, 0xca, 0xa8, 0xaa, 0x65, 0xaa, + 0x7c, 0xe3, 0x2c, 0xd5, 0x49, 0xf6, 0xd6, 0xff, 0xa5, 0xe2, 0x6d, 0x8b, 0x9a, 0x81, 0x3c, 0x37, + 0xd9, 0xf4, 0x0e, 0x9d, 0xaf, 0x7f, 0x72, 0x59, 0xc7, 0x27, 0xc7, 0x52, 0xfe, 0x5b, 0x9b, 0x92, + 0x73, 0xe6, 0x37, 0x92, 0x8d, 0x8e, 0xdd, 0xa1, 0x3c, 0x66, 0xfb, 0xb0, 0x46, 0xac, 0xca, 0x73, + 0x69, 0x3a, 0xef, 0x94, 0xd4, 0x08, 0x9b, 0xe6, 0x12, 0xe0, 0x3c, 0x1a, 0x61, 0x5f, 0x4b, 0x9d, + 0x28, 0xa3, 0xe7, 0x2c, 0x52, 0x3a, 0xd3, 0x63, 0xce, 0x34, 0x37, 0x5a, 0xea, 0xdc, 0x09, 0x0a, + 0xd8, 0x23, 0xb8, 0x4d, 0x7a, 0x50, 0x79, 0xd5, 0x32, 0x03, 0x25, 0x44, 0x96, 0xe7, 0xcf, 0x60, + 0xb3, 0x15, 0x24, 0x4a, 0x63, 0x6c, 0x59, 0x0e, 0xa1, 0x66, 0x38, 0xb3, 0xcd, 0xd9, 0x2d, 0x6e, + 0x16, 0x52, 0x44, 0x5a, 0x72, 0xba, 0xfd, 0x6d, 0xd9, 0x70, 0xbe, 0x2f, 0x1b, 0xce, 0x8f, 0x65, + 0xc3, 0xf9, 0xfc, 0xb3, 0x71, 0x6b, 0xb0, 0x46, 0x7f, 0xeb, 0xc7, 0xbf, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x77, 0x02, 0x29, 0xbb, 0xbe, 0x05, 0x00, 0x00, } diff --git a/internal/private.proto b/internal/private.proto index ec060e355..b83683d51 100644 --- a/internal/private.proto +++ b/internal/private.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package internal; -message DBMeta { +message IndexMeta { string ColumnLabel = 1; string TimeQuantum = 2; } @@ -20,7 +20,7 @@ message ImportResponse { } message BlockDataRequest { - string DB = 1; + string Index = 1; string Frame = 2; string View = 5; uint64 Slice = 4; @@ -28,12 +28,12 @@ message BlockDataRequest { } message BlockDataResponse { - repeated uint64 BitmapIDs = 1; - repeated uint64 ProfileIDs = 2; + repeated uint64 RowIDs = 1; + repeated uint64 ColumnIDs = 2; } message Cache { - repeated uint64 BitmapIDs = 1; + repeated uint64 IDs = 1; } message MaxSlicesResponse { @@ -41,27 +41,27 @@ message MaxSlicesResponse { } message CreateSliceMessage { - string DB = 1; + string Index = 1; uint64 Slice = 2; } -message DeleteDBMessage { - string DB = 1; +message DeleteIndexMessage { + string Index = 1; } -message CreateDBMessage { - string DB = 1; - DBMeta Meta = 2; +message CreateIndexMessage { + string Index = 1; + IndexMeta Meta = 2; } message CreateFrameMessage { - string DB = 1; + string Index = 1; string Frame = 2; FrameMeta Meta = 3; } message DeleteFrameMessage { - string DB = 1; + string Index = 1; string Frame = 2; } @@ -70,15 +70,19 @@ message Frame { FrameMeta Meta = 2; } -message DB { +message Index { string Name = 1; - DBMeta Meta = 2; + IndexMeta Meta = 2; uint64 MaxSlice = 3; repeated Frame Frames = 4; } -message NodeState { +message NodeStatus { string Host = 1; string State = 2; - repeated DB DBs = 3; + repeated Index Indexes = 3; +} + +message ClusterStatus { + repeated NodeStatus Nodes = 1; } diff --git a/internal/public.pb.go b/internal/public.pb.go index 249bbfddf..9eb005c17 100644 --- a/internal/public.pb.go +++ b/internal/public.pb.go @@ -12,7 +12,7 @@ Bitmap Pair Bit - Profile + ColumnAttrSet Attr AttrMap QueryRequest @@ -67,8 +67,8 @@ func (*Pair) ProtoMessage() {} func (*Pair) Descriptor() ([]byte, []int) { return fileDescriptorPublic, []int{1} } type Bit struct { - BitmapID uint64 `protobuf:"varint,1,opt,name=BitmapID,proto3" json:"BitmapID,omitempty"` - ProfileID uint64 `protobuf:"varint,2,opt,name=ProfileID,proto3" json:"ProfileID,omitempty"` + RowID uint64 `protobuf:"varint,1,opt,name=RowID,proto3" json:"RowID,omitempty"` + ColumnID uint64 `protobuf:"varint,2,opt,name=ColumnID,proto3" json:"ColumnID,omitempty"` Timestamp int64 `protobuf:"varint,3,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` } @@ -77,17 +77,17 @@ func (m *Bit) String() string { return proto.CompactTextString(m) } func (*Bit) ProtoMessage() {} func (*Bit) Descriptor() ([]byte, []int) { return fileDescriptorPublic, []int{2} } -type Profile struct { +type ColumnAttrSet struct { ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` Attrs []*Attr `protobuf:"bytes,2,rep,name=Attrs" json:"Attrs,omitempty"` } -func (m *Profile) Reset() { *m = Profile{} } -func (m *Profile) String() string { return proto.CompactTextString(m) } -func (*Profile) ProtoMessage() {} -func (*Profile) Descriptor() ([]byte, []int) { return fileDescriptorPublic, []int{3} } +func (m *ColumnAttrSet) Reset() { *m = ColumnAttrSet{} } +func (m *ColumnAttrSet) String() string { return proto.CompactTextString(m) } +func (*ColumnAttrSet) ProtoMessage() {} +func (*ColumnAttrSet) Descriptor() ([]byte, []int) { return fileDescriptorPublic, []int{3} } -func (m *Profile) GetAttrs() []*Attr { +func (m *ColumnAttrSet) GetAttrs() []*Attr { if m != nil { return m.Attrs } @@ -125,11 +125,11 @@ func (m *AttrMap) GetAttrs() []*Attr { } type QueryRequest struct { - Query string `protobuf:"bytes,1,opt,name=Query,proto3" json:"Query,omitempty"` - Slices []uint64 `protobuf:"varint,2,rep,packed,name=Slices" json:"Slices,omitempty"` - Profiles bool `protobuf:"varint,3,opt,name=Profiles,proto3" json:"Profiles,omitempty"` - Quantum string `protobuf:"bytes,4,opt,name=Quantum,proto3" json:"Quantum,omitempty"` - Remote bool `protobuf:"varint,5,opt,name=Remote,proto3" json:"Remote,omitempty"` + Query string `protobuf:"bytes,1,opt,name=Query,proto3" json:"Query,omitempty"` + Slices []uint64 `protobuf:"varint,2,rep,packed,name=Slices" json:"Slices,omitempty"` + ColumnAttrs bool `protobuf:"varint,3,opt,name=ColumnAttrs,proto3" json:"ColumnAttrs,omitempty"` + Quantum string `protobuf:"bytes,4,opt,name=Quantum,proto3" json:"Quantum,omitempty"` + Remote bool `protobuf:"varint,5,opt,name=Remote,proto3" json:"Remote,omitempty"` } func (m *QueryRequest) Reset() { *m = QueryRequest{} } @@ -138,9 +138,9 @@ func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptorPublic, []int{6} } type QueryResponse struct { - Err string `protobuf:"bytes,1,opt,name=Err,proto3" json:"Err,omitempty"` - Results []*QueryResult `protobuf:"bytes,2,rep,name=Results" json:"Results,omitempty"` - Profiles []*Profile `protobuf:"bytes,3,rep,name=Profiles" json:"Profiles,omitempty"` + Err string `protobuf:"bytes,1,opt,name=Err,proto3" json:"Err,omitempty"` + Results []*QueryResult `protobuf:"bytes,2,rep,name=Results" json:"Results,omitempty"` + ColumnAttrSets []*ColumnAttrSet `protobuf:"bytes,3,rep,name=ColumnAttrSets" json:"ColumnAttrSets,omitempty"` } func (m *QueryResponse) Reset() { *m = QueryResponse{} } @@ -155,9 +155,9 @@ func (m *QueryResponse) GetResults() []*QueryResult { return nil } -func (m *QueryResponse) GetProfiles() []*Profile { +func (m *QueryResponse) GetColumnAttrSets() []*ColumnAttrSet { if m != nil { - return m.Profiles + return m.ColumnAttrSets } return nil } @@ -189,11 +189,11 @@ func (m *QueryResult) GetPairs() []*Pair { } type ImportRequest struct { - DB string `protobuf:"bytes,1,opt,name=DB,proto3" json:"DB,omitempty"` + Index string `protobuf:"bytes,1,opt,name=Index,proto3" json:"Index,omitempty"` Frame string `protobuf:"bytes,2,opt,name=Frame,proto3" json:"Frame,omitempty"` Slice uint64 `protobuf:"varint,3,opt,name=Slice,proto3" json:"Slice,omitempty"` - BitmapIDs []uint64 `protobuf:"varint,4,rep,packed,name=BitmapIDs" json:"BitmapIDs,omitempty"` - ProfileIDs []uint64 `protobuf:"varint,5,rep,packed,name=ProfileIDs" json:"ProfileIDs,omitempty"` + RowIDs []uint64 `protobuf:"varint,4,rep,packed,name=RowIDs" json:"RowIDs,omitempty"` + ColumnIDs []uint64 `protobuf:"varint,5,rep,packed,name=ColumnIDs" json:"ColumnIDs,omitempty"` Timestamps []int64 `protobuf:"varint,6,rep,packed,name=Timestamps" json:"Timestamps,omitempty"` } @@ -206,7 +206,7 @@ func init() { proto.RegisterType((*Bitmap)(nil), "internal.Bitmap") proto.RegisterType((*Pair)(nil), "internal.Pair") proto.RegisterType((*Bit)(nil), "internal.Bit") - proto.RegisterType((*Profile)(nil), "internal.Profile") + proto.RegisterType((*ColumnAttrSet)(nil), "internal.ColumnAttrSet") proto.RegisterType((*Attr)(nil), "internal.Attr") proto.RegisterType((*AttrMap)(nil), "internal.AttrMap") proto.RegisterType((*QueryRequest)(nil), "internal.QueryRequest") @@ -304,15 +304,15 @@ func (m *Bit) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BitmapID != 0 { + if m.RowID != 0 { dAtA[i] = 0x8 i++ - i = encodeVarintPublic(dAtA, i, uint64(m.BitmapID)) + i = encodeVarintPublic(dAtA, i, uint64(m.RowID)) } - if m.ProfileID != 0 { + if m.ColumnID != 0 { dAtA[i] = 0x10 i++ - i = encodeVarintPublic(dAtA, i, uint64(m.ProfileID)) + i = encodeVarintPublic(dAtA, i, uint64(m.ColumnID)) } if m.Timestamp != 0 { dAtA[i] = 0x18 @@ -322,7 +322,7 @@ func (m *Bit) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *Profile) Marshal() (dAtA []byte, err error) { +func (m *ColumnAttrSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -332,7 +332,7 @@ func (m *Profile) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Profile) MarshalTo(dAtA []byte) (int, error) { +func (m *ColumnAttrSet) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -480,10 +480,10 @@ func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPublic(dAtA, i, uint64(j3)) i += copy(dAtA[i:], dAtA4[:j3]) } - if m.Profiles { + if m.ColumnAttrs { dAtA[i] = 0x18 i++ - if m.Profiles { + if m.ColumnAttrs { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -542,8 +542,8 @@ func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } - if len(m.Profiles) > 0 { - for _, msg := range m.Profiles { + if len(m.ColumnAttrSets) > 0 { + for _, msg := range m.ColumnAttrSets { dAtA[i] = 0x1a i++ i = encodeVarintPublic(dAtA, i, uint64(msg.Size())) @@ -627,11 +627,11 @@ func (m *ImportRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.DB) > 0 { + if len(m.Index) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintPublic(dAtA, i, uint64(len(m.DB))) - i += copy(dAtA[i:], m.DB) + i = encodeVarintPublic(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) } if len(m.Frame) > 0 { dAtA[i] = 0x12 @@ -644,10 +644,10 @@ func (m *ImportRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPublic(dAtA, i, uint64(m.Slice)) } - if len(m.BitmapIDs) > 0 { - dAtA7 := make([]byte, len(m.BitmapIDs)*10) + if len(m.RowIDs) > 0 { + dAtA7 := make([]byte, len(m.RowIDs)*10) var j6 int - for _, num := range m.BitmapIDs { + for _, num := range m.RowIDs { for num >= 1<<7 { dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -661,10 +661,10 @@ func (m *ImportRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPublic(dAtA, i, uint64(j6)) i += copy(dAtA[i:], dAtA7[:j6]) } - if len(m.ProfileIDs) > 0 { - dAtA9 := make([]byte, len(m.ProfileIDs)*10) + if len(m.ColumnIDs) > 0 { + dAtA9 := make([]byte, len(m.ColumnIDs)*10) var j8 int - for _, num := range m.ProfileIDs { + for _, num := range m.ColumnIDs { for num >= 1<<7 { dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -760,11 +760,11 @@ func (m *Pair) Size() (n int) { func (m *Bit) Size() (n int) { var l int _ = l - if m.BitmapID != 0 { - n += 1 + sovPublic(uint64(m.BitmapID)) + if m.RowID != 0 { + n += 1 + sovPublic(uint64(m.RowID)) } - if m.ProfileID != 0 { - n += 1 + sovPublic(uint64(m.ProfileID)) + if m.ColumnID != 0 { + n += 1 + sovPublic(uint64(m.ColumnID)) } if m.Timestamp != 0 { n += 1 + sovPublic(uint64(m.Timestamp)) @@ -772,7 +772,7 @@ func (m *Bit) Size() (n int) { return n } -func (m *Profile) Size() (n int) { +func (m *ColumnAttrSet) Size() (n int) { var l int _ = l if m.ID != 0 { @@ -839,7 +839,7 @@ func (m *QueryRequest) Size() (n int) { } n += 1 + sovPublic(uint64(l)) + l } - if m.Profiles { + if m.ColumnAttrs { n += 2 } l = len(m.Quantum) @@ -865,8 +865,8 @@ func (m *QueryResponse) Size() (n int) { n += 1 + l + sovPublic(uint64(l)) } } - if len(m.Profiles) > 0 { - for _, e := range m.Profiles { + if len(m.ColumnAttrSets) > 0 { + for _, e := range m.ColumnAttrSets { l = e.Size() n += 1 + l + sovPublic(uint64(l)) } @@ -899,7 +899,7 @@ func (m *QueryResult) Size() (n int) { func (m *ImportRequest) Size() (n int) { var l int _ = l - l = len(m.DB) + l = len(m.Index) if l > 0 { n += 1 + l + sovPublic(uint64(l)) } @@ -910,16 +910,16 @@ func (m *ImportRequest) Size() (n int) { if m.Slice != 0 { n += 1 + sovPublic(uint64(m.Slice)) } - if len(m.BitmapIDs) > 0 { + if len(m.RowIDs) > 0 { l = 0 - for _, e := range m.BitmapIDs { + for _, e := range m.RowIDs { l += sovPublic(uint64(e)) } n += 1 + sovPublic(uint64(l)) + l } - if len(m.ProfileIDs) > 0 { + if len(m.ColumnIDs) > 0 { l = 0 - for _, e := range m.ProfileIDs { + for _, e := range m.ColumnIDs { l += sovPublic(uint64(e)) } n += 1 + sovPublic(uint64(l)) + l @@ -1209,9 +1209,9 @@ func (m *Bit) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BitmapID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowID", wireType) } - m.BitmapID = 0 + m.RowID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPublic @@ -1221,16 +1221,16 @@ func (m *Bit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BitmapID |= (uint64(b) & 0x7F) << shift + m.RowID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnID", wireType) } - m.ProfileID = 0 + m.ColumnID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPublic @@ -1240,7 +1240,7 @@ func (m *Bit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ProfileID |= (uint64(b) & 0x7F) << shift + m.ColumnID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1285,7 +1285,7 @@ func (m *Bit) Unmarshal(dAtA []byte) error { } return nil } -func (m *Profile) Unmarshal(dAtA []byte) error { +func (m *ColumnAttrSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1308,10 +1308,10 @@ func (m *Profile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Profile: wiretype end group for non-group") + return fmt.Errorf("proto: ColumnAttrSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ColumnAttrSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1772,7 +1772,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnAttrs", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -1789,7 +1789,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { break } } - m.Profiles = bool(v != 0) + m.ColumnAttrs = bool(v != 0) case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Quantum", wireType) @@ -1951,7 +1951,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnAttrSets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1975,8 +1975,8 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Profiles = append(m.Profiles, &Profile{}) - if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ColumnAttrSets = append(m.ColumnAttrSets, &ColumnAttrSet{}) + if err := m.ColumnAttrSets[len(m.ColumnAttrSets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2185,7 +2185,7 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2210,7 +2210,7 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DB = string(dAtA[iNdEx:postIndex]) + m.Index = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2300,7 +2300,7 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.RowIDs = append(m.RowIDs, v) } } else if wireType == 0 { var v uint64 @@ -2318,9 +2318,9 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { break } } - m.BitmapIDs = append(m.BitmapIDs, v) + m.RowIDs = append(m.RowIDs, v) } else { - return fmt.Errorf("proto: wrong wireType = %d for field BitmapIDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowIDs", wireType) } case 5: if wireType == 2 { @@ -2362,7 +2362,7 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { break } } - m.ProfileIDs = append(m.ProfileIDs, v) + m.ColumnIDs = append(m.ColumnIDs, v) } } else if wireType == 0 { var v uint64 @@ -2380,9 +2380,9 @@ func (m *ImportRequest) Unmarshal(dAtA []byte) error { break } } - m.ProfileIDs = append(m.ProfileIDs, v) + m.ColumnIDs = append(m.ColumnIDs, v) } else { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileIDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnIDs", wireType) } case 6: if wireType == 2 { @@ -2575,41 +2575,41 @@ var ( func init() { proto.RegisterFile("public.proto", fileDescriptorPublic) } var fileDescriptorPublic = []byte{ - // 570 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x54, 0x4b, 0x6e, 0xd4, 0x40, - 0x10, 0xa5, 0x6d, 0xcf, 0xaf, 0x26, 0x19, 0x0d, 0x2d, 0x40, 0x16, 0x42, 0x23, 0xcb, 0x62, 0xe1, - 0x0d, 0x13, 0x29, 0x1c, 0x00, 0xe1, 0x4c, 0x22, 0x8d, 0x10, 0x51, 0xd2, 0x89, 0xd8, 0xb1, 0x70, - 0x42, 0x13, 0x2c, 0xf9, 0x47, 0x77, 0x7b, 0x31, 0x4b, 0x16, 0x6c, 0x38, 0x01, 0x47, 0x80, 0x9b, - 0xb0, 0xe4, 0x08, 0x68, 0xb8, 0x08, 0xaa, 0xfe, 0xd8, 0x66, 0x83, 0xd8, 0xf5, 0x7b, 0xe5, 0xea, - 0xae, 0xf7, 0xaa, 0xca, 0x70, 0xd0, 0xb4, 0x37, 0x45, 0x7e, 0xbb, 0x6e, 0x44, 0xad, 0x6a, 0x3a, - 0xcd, 0x2b, 0xc5, 0x45, 0x95, 0x15, 0x71, 0x0a, 0xe3, 0x34, 0x57, 0x65, 0xd6, 0x50, 0x0a, 0x41, - 0x9a, 0x2b, 0x19, 0x92, 0xc8, 0x4f, 0x02, 0xa6, 0xcf, 0xf4, 0x29, 0x8c, 0x5e, 0x2a, 0x25, 0x64, - 0xe8, 0x45, 0x7e, 0x32, 0x3f, 0x5e, 0xac, 0x5d, 0xde, 0x1a, 0x69, 0x66, 0x82, 0xf1, 0x1a, 0x82, - 0x8b, 0x2c, 0x17, 0x74, 0x09, 0xfe, 0x2b, 0xbe, 0x0b, 0x49, 0x44, 0x92, 0x80, 0xe1, 0x91, 0x3e, - 0x80, 0xd1, 0x49, 0xdd, 0x56, 0x2a, 0xf4, 0x34, 0x67, 0x40, 0xfc, 0x16, 0xfc, 0x34, 0x57, 0xf4, - 0x31, 0x4c, 0xcd, 0xd3, 0xdb, 0x8d, 0xcd, 0xe9, 0x30, 0x7d, 0x02, 0xb3, 0x0b, 0x51, 0xbf, 0xcf, - 0x0b, 0xbe, 0xdd, 0xd8, 0xe4, 0x9e, 0xc0, 0xe8, 0x75, 0x5e, 0x72, 0xa9, 0xb2, 0xb2, 0x09, 0xfd, - 0x88, 0x24, 0x3e, 0xeb, 0x89, 0xf8, 0x05, 0x4c, 0xec, 0xa7, 0x74, 0x01, 0x5e, 0x77, 0xb9, 0xb7, - 0xdd, 0xfc, 0xa7, 0x9e, 0x6f, 0x04, 0x02, 0x3c, 0x0d, 0x05, 0xcd, 0x8c, 0x20, 0x0a, 0xc1, 0xf5, - 0xae, 0xe1, 0xb6, 0x24, 0x7d, 0xa6, 0x11, 0xcc, 0xaf, 0x94, 0xc8, 0xab, 0xbb, 0x37, 0x59, 0xd1, - 0x72, 0x5d, 0xcf, 0x8c, 0x0d, 0x29, 0x54, 0xba, 0xad, 0x94, 0x09, 0x07, 0xba, 0xdc, 0x0e, 0xa3, - 0x96, 0xb4, 0xae, 0x0b, 0x13, 0x1c, 0x45, 0x24, 0x99, 0xb2, 0x9e, 0xa0, 0x2b, 0x80, 0xb3, 0xa2, - 0xce, 0x6c, 0xee, 0x38, 0x22, 0x09, 0x61, 0x03, 0x26, 0x3e, 0x82, 0x09, 0x56, 0xfa, 0x3a, 0x6b, - 0x7a, 0x6d, 0xe4, 0x5f, 0xda, 0xbe, 0x10, 0x38, 0xb8, 0x6c, 0xb9, 0xd8, 0x31, 0xfe, 0xb1, 0xe5, - 0x52, 0x61, 0x8b, 0x34, 0xb6, 0x2a, 0x0d, 0xa0, 0x8f, 0x60, 0x7c, 0x55, 0xe4, 0xb7, 0xdc, 0x38, - 0x15, 0x30, 0x8b, 0x50, 0x89, 0xf5, 0x56, 0x6a, 0xa1, 0x53, 0xd6, 0x61, 0x1a, 0xc2, 0xe4, 0xb2, - 0xcd, 0x2a, 0xd5, 0x96, 0x5a, 0xe4, 0x8c, 0x39, 0x88, 0xb7, 0x31, 0x5e, 0xd6, 0xca, 0x09, 0xb4, - 0x28, 0xfe, 0x44, 0xe0, 0xd0, 0x16, 0x23, 0x9b, 0xba, 0x92, 0x1c, 0x1d, 0x3f, 0x15, 0xc2, 0x39, - 0x7e, 0x2a, 0x04, 0x3d, 0x82, 0x09, 0xe3, 0xb2, 0x2d, 0x94, 0x6b, 0xda, 0xc3, 0x5e, 0x98, 0xcb, - 0x6d, 0x0b, 0xc5, 0xdc, 0x57, 0xf4, 0xd9, 0x5f, 0x25, 0x62, 0xc6, 0xfd, 0x3e, 0xc3, 0x46, 0xfa, - 0xaa, 0xe3, 0xcf, 0x04, 0xe6, 0x83, 0x7b, 0x68, 0xe2, 0x16, 0x42, 0x17, 0x31, 0x3f, 0x5e, 0xf6, - 0xc9, 0x86, 0x67, 0x6e, 0x61, 0x0e, 0x80, 0x9c, 0xdb, 0x41, 0x20, 0xe7, 0x68, 0x3f, 0x2e, 0x81, - 0x7b, 0x73, 0x60, 0x3f, 0xd2, 0xcc, 0x04, 0xd1, 0xa3, 0x93, 0x0f, 0x59, 0x75, 0xc7, 0xdf, 0x69, - 0x8f, 0xa6, 0xcc, 0xc1, 0xf8, 0x3b, 0x81, 0xc3, 0x6d, 0xd9, 0xd4, 0x42, 0xb9, 0xce, 0x2c, 0xc0, - 0xdb, 0xa4, 0xd6, 0x0a, 0x6f, 0x93, 0x62, 0xa7, 0xce, 0x44, 0x56, 0x9a, 0xe1, 0x9b, 0x31, 0x03, - 0x90, 0xd5, 0xbd, 0xd1, 0xed, 0x08, 0x98, 0x01, 0x7a, 0xaa, 0xec, 0x2e, 0xc9, 0x30, 0xd0, 0x2d, - 0xec, 0x09, 0x9c, 0xaa, 0x6e, 0x99, 0x64, 0x38, 0xd2, 0xe1, 0x01, 0x83, 0xf1, 0x6e, 0x9d, 0x64, - 0x38, 0x8e, 0xfc, 0xc4, 0x67, 0x03, 0x26, 0x5d, 0xfe, 0xd8, 0xaf, 0xc8, 0xcf, 0xfd, 0x8a, 0xfc, - 0xda, 0xaf, 0xc8, 0xd7, 0xdf, 0xab, 0x7b, 0x37, 0x63, 0xfd, 0x5f, 0x79, 0xfe, 0x27, 0x00, 0x00, - 0xff, 0xff, 0x37, 0xb6, 0x15, 0x22, 0x67, 0x04, 0x00, 0x00, + // 576 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x54, 0x4b, 0x8e, 0xd3, 0x40, + 0x10, 0xa5, 0x63, 0xe7, 0x57, 0xf9, 0x28, 0x6a, 0xf1, 0xb1, 0x10, 0x8a, 0x2c, 0x8b, 0x85, 0x57, + 0x19, 0x69, 0x38, 0x00, 0xc2, 0x49, 0x46, 0xb2, 0x10, 0x23, 0xa6, 0x33, 0xb0, 0xf7, 0xcc, 0xb4, + 0x06, 0x4b, 0xfe, 0xd1, 0xdd, 0x16, 0xe4, 0x00, 0xec, 0x91, 0xd8, 0x70, 0x03, 0x38, 0x0a, 0x4b, + 0x8e, 0x80, 0xc2, 0x45, 0x50, 0x75, 0xbb, 0x63, 0x0f, 0x0b, 0xc4, 0xae, 0xdf, 0xab, 0xae, 0x76, + 0xbd, 0x7a, 0x55, 0x86, 0x69, 0x55, 0x5f, 0x65, 0xe9, 0xf5, 0xaa, 0x12, 0xa5, 0x2a, 0xe9, 0x28, + 0x2d, 0x14, 0x17, 0x45, 0x92, 0x05, 0x11, 0x0c, 0xa2, 0x54, 0xe5, 0x49, 0x45, 0x29, 0xb8, 0x51, + 0xaa, 0xa4, 0x47, 0x7c, 0x27, 0x74, 0x99, 0x3e, 0xd3, 0xa7, 0xd0, 0x7f, 0xa1, 0x94, 0x90, 0x5e, + 0xcf, 0x77, 0xc2, 0xc9, 0xe9, 0x7c, 0x65, 0xf3, 0x56, 0x48, 0x33, 0x13, 0x0c, 0x56, 0xe0, 0xbe, + 0x4e, 0x52, 0x41, 0x17, 0xe0, 0xbc, 0xe4, 0x7b, 0x8f, 0xf8, 0x24, 0x74, 0x19, 0x1e, 0xe9, 0x7d, + 0xe8, 0xaf, 0xcb, 0xba, 0x50, 0x5e, 0x4f, 0x73, 0x06, 0x04, 0x6f, 0xc0, 0x89, 0x52, 0x85, 0x41, + 0x56, 0x7e, 0x88, 0x37, 0x4d, 0x82, 0x01, 0xf4, 0x31, 0x8c, 0xd6, 0x65, 0x56, 0xe7, 0x45, 0xbc, + 0x69, 0xb2, 0x8e, 0x98, 0x3e, 0x81, 0xf1, 0x65, 0x9a, 0x73, 0xa9, 0x92, 0xbc, 0xf2, 0x1c, 0x9f, + 0x84, 0x0e, 0x6b, 0x89, 0x60, 0x0b, 0x33, 0x73, 0x13, 0xab, 0xda, 0x71, 0x45, 0xe7, 0xd0, 0x3b, + 0xbe, 0xde, 0x8b, 0x37, 0xff, 0xa9, 0xe6, 0x3b, 0x01, 0x17, 0x4f, 0x5d, 0x39, 0x63, 0x23, 0x87, + 0x82, 0x7b, 0xb9, 0xaf, 0x78, 0x53, 0x97, 0x3e, 0x53, 0x1f, 0x26, 0x3b, 0x25, 0xd2, 0xe2, 0xf6, + 0x6d, 0x92, 0xd5, 0x5c, 0x57, 0x35, 0x66, 0x5d, 0x0a, 0x15, 0xc5, 0x85, 0x32, 0x61, 0x57, 0x17, + 0x7d, 0xc4, 0xa8, 0x28, 0x2a, 0xcb, 0xcc, 0x04, 0xfb, 0x3e, 0x09, 0x47, 0xac, 0x25, 0xe8, 0x12, + 0xe0, 0x2c, 0x2b, 0x93, 0x26, 0x77, 0xe0, 0x93, 0x90, 0xb0, 0x0e, 0x13, 0x9c, 0xc0, 0x10, 0x2b, + 0x7d, 0x95, 0x54, 0xad, 0x36, 0xf2, 0x2f, 0x6d, 0x9f, 0x09, 0x4c, 0x2f, 0x6a, 0x2e, 0xf6, 0x8c, + 0xbf, 0xaf, 0xb9, 0xd4, 0x1e, 0x68, 0xdc, 0xa8, 0x34, 0x80, 0x3e, 0x84, 0xc1, 0x2e, 0x4b, 0xaf, + 0xb9, 0xe9, 0x94, 0xcb, 0x1a, 0x84, 0x5a, 0xdb, 0x0e, 0x4b, 0xad, 0x75, 0xc4, 0xba, 0x14, 0xf5, + 0x60, 0x78, 0x51, 0x27, 0x85, 0xaa, 0x73, 0x2d, 0x75, 0xcc, 0x2c, 0xc4, 0x37, 0x19, 0xcf, 0x4b, + 0x65, 0x65, 0x36, 0x28, 0xf8, 0x42, 0x60, 0xd6, 0x94, 0x24, 0xab, 0xb2, 0x90, 0x1c, 0xfb, 0xbe, + 0x15, 0xc2, 0xf6, 0x7d, 0x2b, 0x04, 0x3d, 0x81, 0x21, 0xe3, 0xb2, 0xce, 0x94, 0xb5, 0xee, 0x41, + 0x2b, 0xcf, 0xe6, 0xd6, 0x99, 0x62, 0xf6, 0x16, 0x7d, 0x0e, 0xf3, 0x3b, 0xa3, 0x80, 0xb5, 0x62, + 0xde, 0xa3, 0x36, 0xef, 0x4e, 0x9c, 0xfd, 0x75, 0x3d, 0xf8, 0x44, 0x60, 0xd2, 0x79, 0x99, 0x86, + 0x76, 0x4d, 0x74, 0x59, 0x93, 0xd3, 0x45, 0xfb, 0x90, 0xe1, 0x99, 0x5d, 0xa3, 0x29, 0x90, 0xf3, + 0x66, 0x40, 0xc8, 0x39, 0xda, 0x82, 0xab, 0x61, 0xbf, 0xdf, 0xb1, 0x05, 0x69, 0x66, 0x82, 0xd8, + 0xb5, 0xf5, 0xbb, 0xa4, 0xb8, 0xe5, 0x37, 0xba, 0x6b, 0x23, 0x66, 0x61, 0xf0, 0x8d, 0xc0, 0x2c, + 0xce, 0xab, 0x52, 0xa8, 0x8e, 0x63, 0x71, 0x71, 0xc3, 0x3f, 0x5a, 0xc7, 0x34, 0x40, 0xf6, 0x4c, + 0x24, 0xb9, 0x19, 0xcd, 0x31, 0x33, 0x00, 0x59, 0xed, 0x9c, 0x76, 0xca, 0x65, 0x06, 0x68, 0x27, + 0x70, 0xd5, 0xa4, 0xe7, 0x1a, 0x77, 0x0d, 0xc2, 0x59, 0xb4, 0x9b, 0x26, 0xbd, 0xbe, 0x0e, 0xb5, + 0x04, 0xce, 0xe2, 0x71, 0xd5, 0xa4, 0x37, 0xf0, 0x9d, 0xd0, 0x61, 0x1d, 0x26, 0x5a, 0xfc, 0x38, + 0x2c, 0xc9, 0xcf, 0xc3, 0x92, 0xfc, 0x3a, 0x2c, 0xc9, 0xd7, 0xdf, 0xcb, 0x7b, 0x57, 0x03, 0xfd, + 0xaf, 0x79, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x48, 0x20, 0x4b, 0xe7, 0x7b, 0x04, 0x00, 0x00, } diff --git a/internal/public.proto b/internal/public.proto index 03cfdaabd..571fe0364 100644 --- a/internal/public.proto +++ b/internal/public.proto @@ -13,12 +13,12 @@ message Pair { } message Bit { - uint64 BitmapID = 1; - uint64 ProfileID = 2; + uint64 RowID = 1; + uint64 ColumnID = 2; int64 Timestamp = 3; } -message Profile { +message ColumnAttrSet { uint64 ID = 1; repeated Attr Attrs = 2; } @@ -39,7 +39,7 @@ message AttrMap { message QueryRequest { string Query = 1; repeated uint64 Slices = 2; - bool Profiles = 3; + bool ColumnAttrs = 3; string Quantum = 4; bool Remote = 5; } @@ -47,7 +47,7 @@ message QueryRequest { message QueryResponse { string Err = 1; repeated QueryResult Results = 2; - repeated Profile Profiles = 3; + repeated ColumnAttrSet ColumnAttrSets = 3; } message QueryResult { @@ -58,10 +58,10 @@ message QueryResult { } message ImportRequest { - string DB = 1; + string Index = 1; string Frame = 2; uint64 Slice = 3; - repeated uint64 BitmapIDs = 4; - repeated uint64 ProfileIDs = 5; + repeated uint64 RowIDs = 4; + repeated uint64 ColumnIDs = 5; repeated int64 Timestamps = 6; } diff --git a/iterator.go b/iterator.go index 282cb32ed..155914491 100644 --- a/iterator.go +++ b/iterator.go @@ -6,19 +6,19 @@ import ( "github.com/pilosa/pilosa/roaring" ) -// Iterator is an interface for looping over bitmap/profile pairs. +// Iterator is an interface for looping over row/column pairs. type Iterator interface { - Seek(bitmapID, profileID uint64) - Next() (bitmapID, profileID uint64, eof bool) + Seek(rowID, columnID uint64) + Next() (rowID, columnID uint64, eof bool) } // BufIterator wraps an iterator to provide the ability to unread values. type BufIterator struct { buf struct { - bitmapID uint64 - profileID uint64 - eof bool - full bool + rowID uint64 + columnID uint64 + eof bool + full bool } itr Iterator } @@ -29,28 +29,28 @@ func NewBufIterator(itr Iterator) *BufIterator { } // Seek moves to the first pair equal to or greater than pseek/bseek. -func (itr *BufIterator) Seek(bitmapID, profileID uint64) { +func (itr *BufIterator) Seek(rowID, columnID uint64) { itr.buf.full = false - itr.itr.Seek(bitmapID, profileID) + itr.itr.Seek(rowID, columnID) } -// Next returns the next pair in the bitmap. +// Next returns the next pair in the row. // If a value has been buffered then it is returned and the buffer is cleared. -func (itr *BufIterator) Next() (bitmapID, profileID uint64, eof bool) { +func (itr *BufIterator) Next() (rowID, columnID uint64, eof bool) { if itr.buf.full { itr.buf.full = false - return itr.buf.bitmapID, itr.buf.profileID, itr.buf.eof + return itr.buf.rowID, itr.buf.columnID, itr.buf.eof } // Read values onto buffer in case of unread. - itr.buf.bitmapID, itr.buf.profileID, itr.buf.eof = itr.itr.Next() + itr.buf.rowID, itr.buf.columnID, itr.buf.eof = itr.itr.Next() - return itr.buf.bitmapID, itr.buf.profileID, itr.buf.eof + return itr.buf.rowID, itr.buf.columnID, itr.buf.eof } // Peek reads the next value but leaves it on the buffer. -func (itr *BufIterator) Peek() (bitmapID, profileID uint64, eof bool) { - bitmapID, profileID, eof = itr.Next() +func (itr *BufIterator) Peek() (rowID, columnID uint64, eof bool) { + rowID, columnID, eof = itr.Next() itr.Unread() return } @@ -64,30 +64,30 @@ func (itr *BufIterator) Unread() { itr.buf.full = true } -// LimitIterator wraps an Iterator and limits it to a max profile/bitmap pair. +// LimitIterator wraps an Iterator and limits it to a max column/row pair. type LimitIterator struct { - itr Iterator - maxBitmapID uint64 - maxProfileID uint64 + itr Iterator + maxRowID uint64 + maxColumnID uint64 eof bool } // NewLimitIterator returns a new LimitIterator. -func NewLimitIterator(itr Iterator, maxBitmapID, maxProfileID uint64) *LimitIterator { +func NewLimitIterator(itr Iterator, maxRowID, maxColumnID uint64) *LimitIterator { return &LimitIterator{ - itr: itr, - maxBitmapID: maxBitmapID, - maxProfileID: maxProfileID, + itr: itr, + maxRowID: maxRowID, + maxColumnID: maxColumnID, } } -// Seek moves the underlying iterator to a profile/bitmap pair. -func (itr *LimitIterator) Seek(bitmapID, profileID uint64) { itr.itr.Seek(bitmapID, profileID) } +// Seek moves the underlying iterator to a column/row pair. +func (itr *LimitIterator) Seek(rowID, columnID uint64) { itr.itr.Seek(rowID, columnID) } -// Next returns the next bitmap/profile ID pair. +// Next returns the next row/column ID pair. // If the underlying iterator returns a pair higher than the max then EOF is returned. -func (itr *LimitIterator) Next() (bitmapID, profileID uint64, eof bool) { +func (itr *LimitIterator) Next() (rowID, columnID uint64, eof bool) { // Always return EOF once it is reached by limit or the underlying iterator. if itr.eof { return 0, 0, true @@ -95,35 +95,35 @@ func (itr *LimitIterator) Next() (bitmapID, profileID uint64, eof bool) { // Retrieve pair from underlying iterator. // Mark as EOF if it is beyond the limit (or at EOF). - bitmapID, profileID, eof = itr.itr.Next() - if eof || bitmapID > itr.maxBitmapID || (bitmapID == itr.maxBitmapID && profileID > itr.maxProfileID) { + rowID, columnID, eof = itr.itr.Next() + if eof || rowID > itr.maxRowID || (rowID == itr.maxRowID && columnID > itr.maxColumnID) { itr.eof = true return 0, 0, true } - return bitmapID, profileID, false + return rowID, columnID, false } -// SliceIterator iterates over a pair of bitmap/profile ID slices. +// SliceIterator iterates over a pair of row/column ID slices. type SliceIterator struct { - bitmapIDs []uint64 - profileIDs []uint64 + rowIDs []uint64 + columnIDs []uint64 i, n int } -// NewSliceIterator returns an iterator to iterate over a set of bitmap/profile ID pairs. +// NewSliceIterator returns an iterator to iterate over a set of row/column ID pairs. // Both slices MUST have an equal length. Otherwise the function will panic. -func NewSliceIterator(bitmapIDs, profileIDs []uint64) *SliceIterator { - if len(profileIDs) != len(bitmapIDs) { - panic(fmt.Sprintf("pilosa.SliceIterator: pair length mismatch: %d != %d", len(bitmapIDs), len(profileIDs))) +func NewSliceIterator(rowIDs, columnIDs []uint64) *SliceIterator { + if len(columnIDs) != len(rowIDs) { + panic(fmt.Sprintf("pilosa.SliceIterator: pair length mismatch: %d != %d", len(rowIDs), len(columnIDs))) } return &SliceIterator{ - bitmapIDs: bitmapIDs, - profileIDs: profileIDs, + rowIDs: rowIDs, + columnIDs: columnIDs, - n: len(bitmapIDs), + n: len(rowIDs), } } @@ -131,10 +131,10 @@ func NewSliceIterator(bitmapIDs, profileIDs []uint64) *SliceIterator { // If the pair is not found, the iterator seeks to the next pair. func (itr *SliceIterator) Seek(bseek, pseek uint64) { for i := 0; i < itr.n; i++ { - bitmapID := itr.bitmapIDs[i] - profileID := itr.profileIDs[i] + rowID := itr.rowIDs[i] + columnID := itr.columnIDs[i] - if (bseek == bitmapID && pseek <= profileID) || bseek < bitmapID { + if (bseek == rowID && pseek <= columnID) || bseek < rowID { itr.i = i return } @@ -144,20 +144,20 @@ func (itr *SliceIterator) Seek(bseek, pseek uint64) { itr.i = itr.n } -// Next returns the next bitmap/profile ID pair. -func (itr *SliceIterator) Next() (bitmapID, profileID uint64, eof bool) { +// Next returns the next row/column ID pair. +func (itr *SliceIterator) Next() (rowID, columnID uint64, eof bool) { if itr.i >= itr.n { return 0, 0, true } - bitmapID = itr.bitmapIDs[itr.i] - profileID = itr.profileIDs[itr.i] + rowID = itr.rowIDs[itr.i] + columnID = itr.columnIDs[itr.i] itr.i++ - return bitmapID, profileID, false + return rowID, columnID, false } -// RoaringIterator converts a roaring.Iterator to output profile/bitmap pairs. +// RoaringIterator converts a roaring.Iterator to output column/row pairs. type RoaringIterator struct { itr *roaring.Iterator } @@ -173,8 +173,8 @@ func (itr *RoaringIterator) Seek(bseek, pseek uint64) { itr.itr.Seek((bseek * SliceWidth) + pseek) } -// Next returns the next profile/bitmap ID pair. -func (itr *RoaringIterator) Next() (bitmapID, profileID uint64, eof bool) { +// Next returns the next column/row ID pair. +func (itr *RoaringIterator) Next() (rowID, columnID uint64, eof bool) { v, eof := itr.itr.Next() return v / SliceWidth, v % SliceWidth, eof } diff --git a/pilosa.go b/pilosa.go index b754c0e48..16739fd7a 100644 --- a/pilosa.go +++ b/pilosa.go @@ -11,9 +11,9 @@ import ( var ( ErrHostRequired = errors.New("host required") - ErrDatabaseRequired = errors.New("database required") - ErrDatabaseExists = errors.New("database already exists") - ErrDatabaseNotFound = errors.New("database not found") + ErrIndexRequired = errors.New("index required") + ErrIndexExists = errors.New("index already exists") + ErrIndexNotFound = errors.New("index not found") // ErrFrameRequired is returned when no frame is specified. ErrFrameRequired = errors.New("frame required") @@ -24,65 +24,65 @@ var ( ErrInvalidView = errors.New("invalid view") ErrInvalidCacheType = errors.New("invalid cache type") - ErrName = errors.New("invalid database or frame's name, must match [a-z0-9_-]") + ErrName = errors.New("invalid index or frame's name, must match [a-z0-9_-]") // ErrFragmentNotFound is returned when a fragment does not exist. ErrFragmentNotFound = errors.New("fragment not found") ErrQueryRequired = errors.New("query required") ) -// Regular expression to valuate db and frame's name +// Regular expression to valuate index and frame's name // Todo: remove . when frame doesn't require . for topN var nameRegexp = regexp.MustCompile(`^[a-z0-9][a-z0-9._-]{0,64}$`) -// Profile represents vertical column in a database. -// A profile can have a set of attributes attached to it. -type Profile struct { +// ColumnAttrSet represents a set of attributes for a vertical column in an index. +// Can have a set of attributes attached to it. +type ColumnAttrSet struct { ID uint64 `json:"id"` Attrs map[string]interface{} `json:"attrs,omitempty"` } -// encodeProfiles converts a into its internal representation. -func encodeProfiles(a []*Profile) []*internal.Profile { - other := make([]*internal.Profile, len(a)) +// encodeColumnAttrSets converts a into its internal representation. +func encodeColumnAttrSets(a []*ColumnAttrSet) []*internal.ColumnAttrSet { + other := make([]*internal.ColumnAttrSet, len(a)) for i := range a { - other[i] = encodeProfile(a[i]) + other[i] = encodeColumnAttrSet(a[i]) } return other } -// decodeProfiles converts a from its internal representation. -func decodeProfiles(a []*internal.Profile) []*Profile { - other := make([]*Profile, len(a)) +// decodeColumnAttrSets converts a from its internal representation. +func decodeColumnAttrSets(a []*internal.ColumnAttrSet) []*ColumnAttrSet { + other := make([]*ColumnAttrSet, len(a)) for i := range a { - other[i] = decodeProfile(a[i]) + other[i] = decodeColumnAttrSet(a[i]) } return other } -// encodeProfile converts p into its internal representation. -func encodeProfile(p *Profile) *internal.Profile { - return &internal.Profile{ - ID: p.ID, - Attrs: encodeAttrs(p.Attrs), +// encodeColumnAttrSet converts set into its internal representation. +func encodeColumnAttrSet(set *ColumnAttrSet) *internal.ColumnAttrSet { + return &internal.ColumnAttrSet{ + ID: set.ID, + Attrs: encodeAttrs(set.Attrs), } } -// decodeProfile converts b from its internal representation. -func decodeProfile(pb *internal.Profile) *Profile { - p := &Profile{ +// decodeColumnAttrSet converts b from its internal representation. +func decodeColumnAttrSet(pb *internal.ColumnAttrSet) *ColumnAttrSet { + set := &ColumnAttrSet{ ID: pb.ID, } if len(pb.Attrs) > 0 { - p.Attrs = make(map[string]interface{}, len(pb.Attrs)) + set.Attrs = make(map[string]interface{}, len(pb.Attrs)) for _, attr := range pb.Attrs { k, v := decodeAttr(attr) - p.Attrs[k] = v + set.Attrs[k] = v } } - return p + return set } // TimeFormat is the go-style time format used to parse string dates. diff --git a/server.go b/server.go index b22744bae..6c45dccd5 100644 --- a/server.go +++ b/server.go @@ -26,15 +26,7 @@ const ( DefaultPollingInterval = 60 * time.Second ) -// StateHandler specifies two methods which an object must implement to share -// state in the cluster. These are used by the GossipNodeSet to implement the -// LocalState and MergeRemoteState methods of memberlist.Delegate -type StateHandler interface { - LocalState() (proto.Message, error) - HandleRemoteState(proto.Message) error -} - -// Server represents an index wrapped by a running HTTP server. +// Server represents a holder wrapped by a running HTTP server. type Server struct { ln net.Listener @@ -43,7 +35,7 @@ type Server struct { closing chan struct{} // Data storage and HTTP interface. - Index *Index + Holder *Holder Handler *Handler Broadcaster Broadcaster BroadcastReceiver BroadcastReceiver @@ -66,7 +58,7 @@ func NewServer() *Server { s := &Server{ closing: make(chan struct{}), - Index: NewIndex(), + Holder: NewHolder(), Handler: NewHandler(), Broadcaster: NopBroadcaster, BroadcastReceiver: NopBroadcastReceiver, @@ -78,8 +70,7 @@ func NewServer() *Server { LogOutput: os.Stderr, } - s.Handler.Index = s.Index - s.Handler.ServerHandler = s + s.Handler.Holder = s.Holder return s } @@ -109,8 +100,8 @@ func (s *Server) Open() error { s.Cluster.Nodes = []*Node{{Host: s.Host}} } - // Open index. - if err := s.Index.Open(); err != nil { + // Open holder. + if err := s.Holder.Open(); err != nil { return err } @@ -125,20 +116,21 @@ func (s *Server) Open() error { // Create executor for executing queries. e := NewExecutor() - e.Index = s.Index + e.Holder = s.Holder e.Host = s.Host e.Cluster = s.Cluster // Initialize HTTP handler. s.Handler.Broadcaster = s.Broadcaster + s.Handler.StatusHandler = s s.Handler.Host = s.Host s.Handler.Cluster = s.Cluster s.Handler.Executor = e s.Handler.LogOutput = s.LogOutput - // Initialize Index. - s.Index.Broadcaster = s.Broadcaster - s.Index.LogOutput = s.LogOutput + // Initialize Holder. + s.Holder.Broadcaster = s.Broadcaster + s.Holder.LogOutput = s.LogOutput // Serve HTTP. go func() { http.Serve(ln, s.Handler) }() @@ -161,8 +153,8 @@ func (s *Server) Close() error { if s.ln != nil { s.ln.Close() } - if s.Index != nil { - s.Index.Close() + if s.Holder != nil { + s.Holder.Close() } return nil @@ -182,7 +174,7 @@ func (s *Server) monitorAntiEntropy() { ticker := time.NewTicker(s.AntiEntropyInterval) defer ticker.Stop() - s.logger().Printf("index sync monitor initializing (%s interval)", s.AntiEntropyInterval) + s.logger().Printf("holder sync monitor initializing (%s interval)", s.AntiEntropyInterval) for { // Wait for tick or a close. @@ -192,23 +184,23 @@ func (s *Server) monitorAntiEntropy() { case <-ticker.C: } - s.logger().Printf("index sync beginning") + s.logger().Printf("holder sync beginning") - // Initialize syncer with local index and remote client. - var syncer IndexSyncer - syncer.Index = s.Index + // Initialize syncer with local holder and remote client. + var syncer HolderSyncer + syncer.Holder = s.Holder syncer.Host = s.Host syncer.Cluster = s.Cluster syncer.Closing = s.closing - // Sync indexes. - if err := syncer.SyncIndex(); err != nil { - s.logger().Printf("index sync error: err=%s", err) + // Sync holders. + if err := syncer.SyncHolder(); err != nil { + s.logger().Printf("holder sync error: err=%s", err) continue } // Record successful sync in log. - s.logger().Printf("index sync complete") + s.logger().Printf("holder sync complete") } } @@ -229,20 +221,20 @@ func (s *Server) monitorMaxSlices() { case <-ticker.C: } - oldmaxslices := s.Index.MaxSlices() + oldmaxslices := s.Holder.MaxSlices() for _, node := range s.Cluster.Nodes { if s.Host != node.Host { maxSlices, _ := checkMaxSlices(node.Host) - for db, newmax := range maxSlices { - // if we don't know about a db locally, log an error because - // db's should be created and synced prior to slice creation - if localdb := s.Index.DB(db); localdb != nil { - if newmax > oldmaxslices[db] { - oldmaxslices[db] = newmax - localdb.SetRemoteMaxSlice(newmax) + for index, newmax := range maxSlices { + // if we don't know about an index locally, log an error because + // indexes should be created and synced prior to slice creation + if localIndex := s.Holder.Index(index); localIndex != nil { + if newmax > oldmaxslices[index] { + oldmaxslices[index] = newmax + localIndex.SetRemoteMaxSlice(newmax) } } else { - s.logger().Printf("Local DB not found: %s", db) + s.logger().Printf("Local Index not found: %s", index) } } } @@ -253,90 +245,104 @@ func (s *Server) monitorMaxSlices() { func (s *Server) ReceiveMessage(pb proto.Message) error { switch obj := pb.(type) { case *internal.CreateSliceMessage: - d := s.Index.DB(obj.DB) - if d == nil { - return fmt.Errorf("Local DB not found: %s", obj.DB) + idx := s.Holder.Index(obj.Index) + if idx == nil { + return fmt.Errorf("Local Index not found: %s", obj.Index) } - d.SetRemoteMaxSlice(obj.Slice) - case *internal.CreateDBMessage: - opt := DBOptions{ColumnLabel: obj.Meta.ColumnLabel} - _, err := s.Index.CreateDB(obj.DB, opt) + idx.SetRemoteMaxSlice(obj.Slice) + case *internal.CreateIndexMessage: + opt := IndexOptions{ColumnLabel: obj.Meta.ColumnLabel} + _, err := s.Holder.CreateIndex(obj.Index, opt) if err != nil { return err } - case *internal.DeleteDBMessage: - if err := s.Index.DeleteDB(obj.DB); err != nil { + case *internal.DeleteIndexMessage: + if err := s.Holder.DeleteIndex(obj.Index); err != nil { return err } case *internal.CreateFrameMessage: - db := s.Index.DB(obj.DB) + index := s.Holder.Index(obj.Index) opt := FrameOptions{RowLabel: obj.Meta.RowLabel} - _, err := db.CreateFrame(obj.Frame, opt) + _, err := index.CreateFrame(obj.Frame, opt) if err != nil { return err } case *internal.DeleteFrameMessage: - db := s.Index.DB(obj.DB) - if err := db.DeleteFrame(obj.Frame); err != nil { + index := s.Holder.Index(obj.Index) + if err := index.DeleteFrame(obj.Frame); err != nil { return err } } return nil } -// LocalState returns the state of the local node as well as the -// index (dbs/frames) according to the local node. -// Server implements gossip.StateHandler. +// Server implements StatusHandler. +// LocalStatus returns the state of the local node as well as the +// holder (indexes/frames) according to the local node. // In a gossip implementation, memberlist.Delegate.LocalState() uses this. -func (s *Server) LocalState() (proto.Message, error) { - if s.Index == nil { - return nil, errors.New("Server.Index is nil") - } - - // Get Node DB Slices - for _, db := range s.Index.DBs() { - maxSlice := db.MaxSlice() - slices := s.Cluster.OwnsSlices(db.name, maxSlice, s.Host) - fmt.Println("Slices ", slices) +func (s *Server) LocalStatus() (proto.Message, error) { + if s.Holder == nil { + return nil, errors.New("Server.Holder is nil.") } + return &internal.NodeStatus{ + Host: s.Host, + State: NodeStateUp, + Indexes: encodeIndexes(s.Holder.Indexes()), ns := internal.NodeState{ - Host: s.Host, - State: "OK", // TODO: make this work, pull from s.Cluster.Node - DBs: encodeDBs(s.Index.DBs()), } s.Cluster.SetNodeState(&ns) return &ns, nil } -// HandleRemoteState receives incoming NodeState from remote nodes. -func (s *Server) HandleRemoteState(pb proto.Message) error { - return s.mergeRemoteState(pb.(*internal.NodeState)) +// ClusterStatus returns the NodeState for all nodes in the cluster. +func (s *Server) ClusterStatus() (proto.Message, error) { + // Update local Node.state. + ns, err := s.LocalStatus() + if err != nil { + return nil, err + } + node := s.Cluster.NodeByHost(s.Host) + node.SetStatus(ns.(*internal.NodeStatus)) + + // Update NodeState for all nodes. + for host, nodeState := range s.Cluster.NodeStates() { + node := s.Cluster.NodeByHost(host) + node.SetState(nodeState) + } + + return s.Cluster.Status(), nil +} + +// HandleRemoteStatus receives incoming NodeState from remote nodes. +func (s *Server) HandleRemoteStatus(pb proto.Message) error { + return s.mergeRemoteStatus(pb.(*internal.NodeStatus)) } -func (s *Server) mergeRemoteState(ns *internal.NodeState) error { - // store this node's state in the cluster node map - s.Cluster.SetNodeState(ns) +func (s *Server) mergeRemoteStatus(ns *internal.NodeStatus) error { + // Update Node.state. + node := s.Cluster.NodeByHost(ns.Host) + node.SetStatus(ns) - // Create databases that don't exist. - for _, db := range ns.DBs { - opt := DBOptions{ - ColumnLabel: db.Meta.ColumnLabel, - TimeQuantum: TimeQuantum(db.Meta.TimeQuantum), + // Create indexes that don't exist. + for _, index := range ns.Indexes { + opt := IndexOptions{ + ColumnLabel: index.Meta.ColumnLabel, + TimeQuantum: TimeQuantum(index.Meta.TimeQuantum), } - d, err := s.Index.CreateDBIfNotExists(db.Name, opt) + idx, err := s.Holder.CreateIndexIfNotExists(index.Name, opt) if err != nil { return err } // Create frames that don't exist. - for _, f := range db.Frames { + for _, f := range index.Frames { opt := FrameOptions{ RowLabel: f.Meta.RowLabel, TimeQuantum: TimeQuantum(f.Meta.TimeQuantum), CacheSize: f.Meta.CacheSize, } - _, err := d.CreateFrameIfNotExists(f.Name, opt) + _, err := idx.CreateFrameIfNotExists(f.Name, opt) if err != nil { return err } @@ -408,12 +414,21 @@ func (s *Server) monitorRuntime() { return case <-gcn.AfterGC(): // GC just ran - s.Index.Stats.Count("garbage_collection", 1) + s.Holder.Stats.Count("garbage_collection", 1) case <-ticker.C: } // Record the number of go routines - s.Index.Stats.Gauge("goroutines", float64(runtime.NumGoroutine())) + s.Holder.Stats.Gauge("goroutines", float64(runtime.NumGoroutine())) } } } + +// StatusHandler specifies two methods which an object must implement to share +// state in the cluster. These are used by the GossipNodeSet to implement the +// LocalState and MergeRemoteState methods of memberlist.Delegate +type StatusHandler interface { + LocalStatus() (proto.Message, error) + ClusterStatus() (proto.Message, error) + HandleRemoteStatus(proto.Message) error +} diff --git a/server/server.go b/server/server.go index 7e4171d3d..1f52c0f48 100644 --- a/server/server.go +++ b/server/server.go @@ -1,4 +1,4 @@ -// package server contains the `pilosa server` subcommand which runs Pilosa +// Package server contains the `pilosa server` subcommand which runs Pilosa // itself. The purpose of this package is to define an easily tested Command // object which handles interpreting configuration and setting up all the // objects that Pilosa needs. @@ -51,7 +51,7 @@ type Command struct { Done chan struct{} } -// NewMain returns a new instance of Main. +// NewCommand returns a new instance of Main. func NewCommand(stdin io.Reader, stdout, stderr io.Writer) *Command { return &Command{ Server: pilosa.NewServer(), @@ -90,6 +90,7 @@ func (m *Command) Run(args ...string) (err error) { return nil } +// SetupServer use the cluster configuration to setup this server func (m *Command) SetupServer() error { var err error cluster := pilosa.NewCluster() @@ -117,11 +118,11 @@ func (m *Command) SetupServer() error { m.Server.LogOutput = logFile } - // Configure index. + // Configure holder. fmt.Fprintf(m.Stderr, "Using data from: %s\n", m.Config.DataDir) - m.Server.Index.Path = m.Config.DataDir + m.Server.Holder.Path = m.Config.DataDir m.Server.MetricInterval = time.Duration(m.Config.Metric.PollingInterval) - m.Server.Index.Stats, err = NewStatsClient(m.Config.Metric.Service, m.Config.Metric.Host) + m.Server.Holder.Stats, err = NewStatsClient(m.Config.Metric.Service, m.Config.Metric.Host) if err != nil { return err } @@ -168,8 +169,12 @@ func (m *Command) SetupServer() error { m.Server.Broadcaster = pilosa.NopBroadcaster m.Server.Cluster.NodeSet = pilosa.NewStaticNodeSet() m.Server.BroadcastReceiver = pilosa.NopBroadcastReceiver + err := m.Server.Cluster.NodeSet.(*pilosa.StaticNodeSet).Join(m.Server.Cluster.Nodes) + if err != nil { + return err + } default: - return fmt.Errorf("'%v' is not a supported value for broadcaster type.", m.Config.Cluster.Type) + return fmt.Errorf("'%v' is not a supported value for broadcaster type", m.Config.Cluster.Type) } // Set configuration options. diff --git a/server/server_test.go b/server/server_test.go index 8cd7c1d24..4e3fa5e36 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -39,29 +39,29 @@ func TestMain_Set_Quick(t *testing.T) { // Execute SetBit() commands. for _, cmd := range cmds { - if err := client.CreateDB(context.Background(), "d", pilosa.DBOptions{}); err != nil && err != pilosa.ErrDatabaseExists { + if err := client.CreateIndex(context.Background(), "i", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) } - if err := client.CreateFrame(context.Background(), "d", cmd.Frame, pilosa.FrameOptions{}); err != nil && err != pilosa.ErrFrameExists { + if err := client.CreateFrame(context.Background(), "i", cmd.Frame, pilosa.FrameOptions{}); err != nil && err != pilosa.ErrFrameExists { t.Fatal(err) } - if _, err := m.Query("d", "", fmt.Sprintf(`SetBit(id=%d, frame=%q, profileID=%d)`, cmd.ID, cmd.Frame, cmd.ProfileID)); err != nil { + if _, err := m.Query("i", "", fmt.Sprintf(`SetBit(id=%d, frame=%q, columnID=%d)`, cmd.ID, cmd.Frame, cmd.ColumnID)); err != nil { t.Fatal(err) } } // Validate data. for frame, frameSet := range SetCommands(cmds).Frames() { - for id, profileIDs := range frameSet { + for id, columnIDs := range frameSet { exp := MustMarshalJSON(map[string]interface{}{ "results": []interface{}{ map[string]interface{}{ - "bits": profileIDs, + "bits": columnIDs, "attrs": map[string]interface{}{}, }, }, }) + "\n" - if res, err := m.Query("d", "", fmt.Sprintf(`Bitmap(id=%d, frame=%q)`, id, frame)); err != nil { + if res, err := m.Query("i", "", fmt.Sprintf(`Bitmap(id=%d, frame=%q)`, id, frame)); err != nil { t.Fatal(err) } else if res != exp { t.Fatalf("unexpected result:\n\ngot=%s\n\nexp=%s\n\n", res, exp) @@ -75,16 +75,16 @@ func TestMain_Set_Quick(t *testing.T) { // Validate data after reopening. for frame, frameSet := range SetCommands(cmds).Frames() { - for id, profileIDs := range frameSet { + for id, columnIDs := range frameSet { exp := MustMarshalJSON(map[string]interface{}{ "results": []interface{}{ map[string]interface{}{ - "bits": profileIDs, + "bits": columnIDs, "attrs": map[string]interface{}{}, }, }, }) + "\n" - if res, err := m.Query("d", "", fmt.Sprintf(`Bitmap(id=%d, frame=%q)`, id, frame)); err != nil { + if res, err := m.Query("i", "", fmt.Sprintf(`Bitmap(id=%d, frame=%q)`, id, frame)); err != nil { t.Fatal(err) } else if res != exp { t.Fatalf("unexpected result (reopen):\n\ngot=%s\n\nexp=%s\n\n", res, exp) @@ -102,54 +102,54 @@ func TestMain_Set_Quick(t *testing.T) { } } -// Ensure program can set bitmap attributes and retrieve them. -func TestMain_SetBitmapAttrs(t *testing.T) { +// Ensure program can set row attributes and retrieve them. +func TestMain_SetRowAttrs(t *testing.T) { m := MustRunMain() defer m.Close() // Create frames. client := m.Client() - if err := client.CreateDB(context.Background(), "d", pilosa.DBOptions{}); err != nil && err != pilosa.ErrDatabaseExists { + if err := client.CreateIndex(context.Background(), "i", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "x.n", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "i", "x.n", pilosa.FrameOptions{}); err != nil { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "z", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "i", "z", pilosa.FrameOptions{}); err != nil { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "neg", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "i", "neg", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } - // Set bits on different bitmaps in different frames. - if _, err := m.Query("d", "", `SetBit(id=1, frame="x.n", profileID=100)`); err != nil { + // Set bits on different rows in different frames. + if _, err := m.Query("i", "", `SetBit(id=1, frame="x.n", columnID=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBit(id=2, frame="x.n", profileID=100)`); err != nil { + } else if _, err := m.Query("i", "", `SetBit(id=2, frame="x.n", columnID=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBit(id=2, frame="z", profileID=100)`); err != nil { + } else if _, err := m.Query("i", "", `SetBit(id=2, frame="z", columnID=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBit(id=3, frame="neg", profileID=100)`); err != nil { + } else if _, err := m.Query("i", "", `SetBit(id=3, frame="neg", columnID=100)`); err != nil { t.Fatal(err) } - // Set bitmap attributes. - if _, err := m.Query("d", "", `SetBitmapAttrs(id=1, frame="x.n", x=100)`); err != nil { + // Set row attributes. + if _, err := m.Query("i", "", `SetRowAttrs(id=1, frame="x.n", x=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBitmapAttrs(id=2, frame="x.n", x=-200)`); err != nil { + } else if _, err := m.Query("i", "", `SetRowAttrs(id=2, frame="x.n", x=-200)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBitmapAttrs(id=2, frame="z", x=300)`); err != nil { + } else if _, err := m.Query("i", "", `SetRowAttrs(id=2, frame="z", x=300)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBitmapAttrs(id=3, frame="neg", x=-0.44)`); err != nil { + } else if _, err := m.Query("i", "", `SetRowAttrs(id=3, frame="neg", x=-0.44)`); err != nil { t.Fatal(err) } - // Query bitmap x.n/1. - if res, err := m.Query("d", "", `Bitmap(id=1, frame="x.n")`); err != nil { + // Query row x.n/1. + if res, err := m.Query("i", "", `Bitmap(id=1, frame="x.n")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{"x":100},"bits":[100]}]}`+"\n" { t.Fatalf("unexpected result: %s", res) } - // Query bitmap x.n/2. - if res, err := m.Query("d", "", `Bitmap(id=2, frame="x.n")`); err != nil { + // Query row x.n/2. + if res, err := m.Query("i", "", `Bitmap(id=2, frame="x.n")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{"x":-200},"bits":[100]}]}`+"\n" { t.Fatalf("unexpected result: %s", res) @@ -159,55 +159,55 @@ func TestMain_SetBitmapAttrs(t *testing.T) { t.Fatal(err) } - // Query bitmaps after reopening. - if res, err := m.Query("d", "profiles=true", `Bitmap(id=1, frame="x.n")`); err != nil { + // Query rows after reopening. + if res, err := m.Query("i", "columnAttrs=true", `Bitmap(id=1, frame="x.n")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{"x":100},"bits":[100]}]}`+"\n" { t.Fatalf("unexpected result(reopen): %s", res) } - if res, err := m.Query("d", "profiles=true", `Bitmap(id=3, frame="neg")`); err != nil { + if res, err := m.Query("i", "columnAttrs=true", `Bitmap(id=3, frame="neg")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{"x":-0.44},"bits":[100]}]}`+"\n" { t.Fatalf("unexpected result(reopen): %s", res) } - // Query bitmap x.n/2. - if res, err := m.Query("d", "", `Bitmap(id=2, frame="x.n")`); err != nil { + // Query row x.n/2. + if res, err := m.Query("i", "", `Bitmap(id=2, frame="x.n")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{"x":-200},"bits":[100]}]}`+"\n" { t.Fatalf("unexpected result: %s", res) } } -// Ensure program can set profile attributes and retrieve them. -func TestMain_SetProfileAttrs(t *testing.T) { +// Ensure program can set column attributes and retrieve them. +func TestMain_SetColumnAttrs(t *testing.T) { m := MustRunMain() defer m.Close() // Create frames. client := m.Client() - if err := client.CreateDB(context.Background(), "d", pilosa.DBOptions{}); err != nil && err != pilosa.ErrDatabaseExists { + if err := client.CreateIndex(context.Background(), "i", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "x.n", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "i", "x.n", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } - // Set bits on bitmap. - if _, err := m.Query("d", "", `SetBit(id=1, frame="x.n", profileID=100)`); err != nil { + // Set bits on row. + if _, err := m.Query("i", "", `SetBit(id=1, frame="x.n", columnID=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBit(id=1, frame="x.n", profileID=101)`); err != nil { + } else if _, err := m.Query("i", "", `SetBit(id=1, frame="x.n", columnID=101)`); err != nil { t.Fatal(err) } - // Set profile attributes. - if _, err := m.Query("d", "", `SetProfileAttrs(id=100, foo="bar")`); err != nil { + // Set column attributes. + if _, err := m.Query("i", "", `SetColumnAttrs(id=100, foo="bar")`); err != nil { t.Fatal(err) } - // Query bitmap. - if res, err := m.Query("d", "profiles=true", `Bitmap(id=1, frame="x.n")`); err != nil { + // Query row. + if res, err := m.Query("i", "columnAttrs=true", `Bitmap(id=1, frame="x.n")`); err != nil { t.Fatal(err) - } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"profiles":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { + } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"columnAttrs":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { t.Fatalf("unexpected result: %s", res) } @@ -215,43 +215,43 @@ func TestMain_SetProfileAttrs(t *testing.T) { t.Fatal(err) } - // Query bitmap after reopening. - if res, err := m.Query("d", "profiles=true", `Bitmap(id=1, frame="x.n")`); err != nil { + // Query row after reopening. + if res, err := m.Query("i", "columnAttrs=true", `Bitmap(id=1, frame="x.n")`); err != nil { t.Fatal(err) - } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"profiles":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { + } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"columnAttrs":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { t.Fatalf("unexpected result(reopen): %s", res) } } -// Ensure program can set profile attributes with columnLabel option. -func TestMain_SetProfileAttrsWithColumnOption(t *testing.T) { +// Ensure program can set column attributes with columnLabel option. +func TestMain_SetColumnAttrsWithColumnOption(t *testing.T) { m := MustRunMain() defer m.Close() // Create frames. client := m.Client() - if err := client.CreateDB(context.Background(), "d", pilosa.DBOptions{ColumnLabel: "col"}); err != nil && err != pilosa.ErrDatabaseExists { + if err := client.CreateIndex(context.Background(), "i", pilosa.IndexOptions{ColumnLabel: "col"}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "x.n", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "i", "x.n", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } - // Set bits on bitmap. - if _, err := m.Query("d", "", `SetBit(id=1, frame="x.n", col=100)`); err != nil { + // Set bits on row. + if _, err := m.Query("i", "", `SetBit(id=1, frame="x.n", col=100)`); err != nil { t.Fatal(err) - } else if _, err := m.Query("d", "", `SetBit(id=1, frame="x.n", col=101)`); err != nil { + } else if _, err := m.Query("i", "", `SetBit(id=1, frame="x.n", col=101)`); err != nil { t.Fatal(err) } - // Set profile attributes. - if _, err := m.Query("d", "", `SetProfileAttrs(col=100, foo="bar")`); err != nil { + // Set column attributes. + if _, err := m.Query("i", "", `SetColumnAttrs(col=100, foo="bar")`); err != nil { t.Fatal(err) } - // Query bitmap. - if res, err := m.Query("d", "profiles=true", `Bitmap(id=1, frame="x.n")`); err != nil { + // Query row. + if res, err := m.Query("i", "columnAttrs=true", `Bitmap(id=1, frame="x.n")`); err != nil { t.Fatal(err) - } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"profiles":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { + } else if res != `{"results":[{"attrs":{},"bits":[100,101]}],"columnAttrs":[{"id":100,"attrs":{"foo":"bar"}}]}`+"\n" { t.Fatalf("unexpected result: %s", res) } @@ -274,27 +274,27 @@ func TestMain_FrameRestore(t *testing.T) { // Create frames. client := m0.Client() - if err := client.CreateDB(context.Background(), "d", pilosa.DBOptions{}); err != nil && err != pilosa.ErrDatabaseExists { + if err := client.CreateIndex(context.Background(), "x", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) - } else if err := client.CreateFrame(context.Background(), "d", "f", pilosa.FrameOptions{}); err != nil { + } else if err := client.CreateFrame(context.Background(), "x", "f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) } // Write data on first cluster. - if _, err := m0.Query("d", "", ` - SetBit(id=1, frame="f", profileID=100) - SetBit(id=1, frame="f", profileID=1000) - SetBit(id=1, frame="f", profileID=100000) - SetBit(id=1, frame="f", profileID=200000) - SetBit(id=1, frame="f", profileID=400000) - SetBit(id=1, frame="f", profileID=600000) - SetBit(id=1, frame="f", profileID=800000) + if _, err := m0.Query("x", "", ` + SetBit(id=1, frame="f", columnID=100) + SetBit(id=1, frame="f", columnID=1000) + SetBit(id=1, frame="f", columnID=100000) + SetBit(id=1, frame="f", columnID=200000) + SetBit(id=1, frame="f", columnID=400000) + SetBit(id=1, frame="f", columnID=600000) + SetBit(id=1, frame="f", columnID=800000) `); err != nil { t.Fatal(err) } - // Query bitmap on first cluster. - if res, err := m0.Query("d", "", `Bitmap(id=1, frame="f")`); err != nil { + // Query row on first cluster. + if res, err := m0.Query("x", "", `Bitmap(id=1, frame="f")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{},"bits":[100,1000,100000,200000,400000,600000,800000]}]}`+"\n" { t.Fatalf("unexpected result: %s", res) @@ -308,16 +308,16 @@ func TestMain_FrameRestore(t *testing.T) { client, err := pilosa.NewClient(m2.Server.Host) if err != nil { t.Fatal(err) - } else if err := m2.Client().CreateDB(context.Background(), "d", pilosa.DBOptions{}); err != nil && err != pilosa.ErrDatabaseExists { + } else if err := m2.Client().CreateIndex(context.Background(), "x", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists { t.Fatal(err) - } else if err := m2.Client().CreateFrame(context.Background(), "d", "f", pilosa.FrameOptions{}); err != nil { + } else if err := m2.Client().CreateFrame(context.Background(), "x", "f", pilosa.FrameOptions{}); err != nil { t.Fatal(err) - } else if err := client.RestoreFrame(context.Background(), m0.Server.Host, "d", "f"); err != nil { + } else if err := client.RestoreFrame(context.Background(), m0.Server.Host, "x", "f"); err != nil { t.Fatal(err) } - // Query bitmap on second cluster. - if res, err := m2.Query("d", "", `Bitmap(id=1, frame="f")`); err != nil { + // Query row on second cluster. + if res, err := m2.Query("x", "", `Bitmap(id=1, frame="f")`); err != nil { t.Fatal(err) } else if res != `{"results":[{"attrs":{},"bits":[100,1000,100000,200000,400000,600000,800000]}]}`+"\n" { t.Fatalf("unexpected result: %s", res) @@ -431,8 +431,9 @@ func (m *Main) Client() *pilosa.Client { } // Query executes a query against the program through the HTTP API. -func (m *Main) Query(db, rawQuery, query string) (string, error) { - resp := MustDo("POST", m.URL()+fmt.Sprintf("/db/%s/query?", db)+rawQuery, query) +func (m *Main) Query(index, rawQuery, query string) (string, error) { + fmt.Println("Query:", index, query) + resp := MustDo("POST", m.URL()+fmt.Sprintf("/index/%s/query?", index)+rawQuery, query) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("invalid status: %d, body=%s", resp.StatusCode, resp.Body) } @@ -441,14 +442,14 @@ func (m *Main) Query(db, rawQuery, query string) (string, error) { // SetCommand represents a command to set a bit. type SetCommand struct { - ID uint64 - Frame string - ProfileID uint64 + ID uint64 + Frame string + ColumnID uint64 } type SetCommands []SetCommand -// Frames returns the set of profile ids for each frame/bitmap. +// Frames returns the set of column ids for each frame/row. func (a SetCommands) Frames() map[string]map[uint64][]uint64 { // Create a set of unique commands. m := make(map[SetCommand]struct{}) @@ -456,16 +457,16 @@ func (a SetCommands) Frames() map[string]map[uint64][]uint64 { m[cmd] = struct{}{} } - // Build unique ids for each frame & bitmap. + // Build unique ids for each frame & row. frames := make(map[string]map[uint64][]uint64) for cmd := range m { if frames[cmd.Frame] == nil { frames[cmd.Frame] = make(map[uint64][]uint64) } - frames[cmd.Frame][cmd.ID] = append(frames[cmd.Frame][cmd.ID], cmd.ProfileID) + frames[cmd.Frame][cmd.ID] = append(frames[cmd.Frame][cmd.ID], cmd.ColumnID) } - // Sort each set of profile ids. + // Sort each set of column ids. for _, frame := range frames { for id := range frame { sort.Sort(uint64Slice(frame[id])) @@ -480,9 +481,9 @@ func GenerateSetCommands(n int, rand *rand.Rand) []SetCommand { cmds := make([]SetCommand, rand.Intn(n)) for i := range cmds { cmds[i] = SetCommand{ - ID: uint64(rand.Intn(1000)), - Frame: "x.n", - ProfileID: uint64(rand.Intn(10)), + ID: uint64(rand.Intn(1000)), + Frame: "x.n", + ColumnID: uint64(rand.Intn(10)), } } return cmds diff --git a/view.go b/view.go index c7ec92733..9f67b516d 100644 --- a/view.go +++ b/view.go @@ -26,7 +26,7 @@ func IsValidView(name string) bool { type View struct { mu sync.Mutex path string - db string + index string frame string name string @@ -38,15 +38,15 @@ type View struct { stats StatsClient - BitmapAttrStore *AttrStore - LogOutput io.Writer + RowAttrStore *AttrStore + LogOutput io.Writer } // NewView returns a new instance of View. -func NewView(path, db, frame, name string, cacheSize uint32) *View { +func NewView(path, index, frame, name string, cacheSize uint32) *View { return &View{ path: path, - db: db, + index: index, frame: frame, name: name, cacheSize: cacheSize, @@ -62,8 +62,8 @@ func NewView(path, db, frame, name string, cacheSize uint32) *View { // Name returns the name the view was initialized with. func (v *View) Name() string { return v.name } -// DB returns the database name the view was initialized with. -func (v *View) DB() string { return v.db } +// Index returns the index name the view was initialized with. +func (v *View) Index() string { return v.index } // Frame returns the frame name the view was initialized with. func (v *View) Frame() string { return v.frame } @@ -124,7 +124,7 @@ func (v *View) openFragments() error { if err := frag.Open(); err != nil { return fmt.Errorf("open fragment: slice=%s, err=%s", frag.Slice(), err) } - frag.BitmapAttrStore = v.BitmapAttrStore + frag.RowAttrStore = v.RowAttrStore v.fragments[frag.Slice()] = frag v.stats.Count("maxSlice", 1) @@ -205,7 +205,7 @@ func (v *View) createFragmentIfNotExists(slice uint64) (*Fragment, error) { if err := frag.Open(); err != nil { return nil, err } - frag.BitmapAttrStore = v.BitmapAttrStore + frag.RowAttrStore = v.RowAttrStore // Save to lookup. v.fragments[slice] = frag @@ -216,7 +216,7 @@ func (v *View) createFragmentIfNotExists(slice uint64) (*Fragment, error) { } func (v *View) newFragment(path string, slice uint64) *Fragment { - frag := NewFragment(path, v.db, v.frame, v.name, slice) + frag := NewFragment(path, v.index, v.frame, v.name, slice) frag.cacheType = v.cacheType frag.cacheSize = v.cacheSize frag.LogOutput = v.LogOutput @@ -225,23 +225,23 @@ func (v *View) newFragment(path string, slice uint64) *Fragment { } // SetBit sets a bit within the view. -func (v *View) SetBit(bitmapID, profileID uint64) (changed bool, err error) { - slice := profileID / SliceWidth +func (v *View) SetBit(rowID, columnID uint64) (changed bool, err error) { + slice := columnID / SliceWidth frag, err := v.CreateFragmentIfNotExists(slice) if err != nil { return changed, err } - return frag.SetBit(bitmapID, profileID) + return frag.SetBit(rowID, columnID) } // ClearBit clears a bit within the view. -func (v *View) ClearBit(bitmapID, profileID uint64) (changed bool, err error) { - slice := profileID / SliceWidth +func (v *View) ClearBit(rowID, columnID uint64) (changed bool, err error) { + slice := columnID / SliceWidth frag, err := v.CreateFragmentIfNotExists(slice) if err != nil { return changed, err } - return frag.ClearBit(bitmapID, profileID) + return frag.ClearBit(rowID, columnID) } // IsInverseView returns true if the view is used for storing an inverted representation. diff --git a/view_test.go b/view_test.go index d10c90936..adaa0ced2 100644 --- a/view_test.go +++ b/view_test.go @@ -10,11 +10,11 @@ import ( // View is a test wrapper for pilosa.View. type View struct { *pilosa.View - BitmapAttrStore *AttrStore + RowAttrStore *AttrStore } // NewView returns a new instance of View with a temporary path. -func NewView(db, frame, name string) *View { +func NewView(index, frame, name string) *View { file, err := ioutil.TempFile("", "pilosa-view-") if err != nil { panic(err) @@ -22,16 +22,16 @@ func NewView(db, frame, name string) *View { file.Close() v := &View{ - View: pilosa.NewView(file.Name(), db, frame, name, pilosa.DefaultCacheSize), - BitmapAttrStore: MustOpenAttrStore(), + View: pilosa.NewView(file.Name(), index, frame, name, pilosa.DefaultCacheSize), + RowAttrStore: MustOpenAttrStore(), } - v.View.BitmapAttrStore = v.BitmapAttrStore.AttrStore + v.View.RowAttrStore = v.RowAttrStore.AttrStore return v } // MustOpenView creates and opens an view at a temporary path. Panic on error. -func MustOpenView(db, frame, name string) *View { - v := NewView(db, frame, name) +func MustOpenView(index, frame, name string) *View { + v := NewView(index, frame, name) if err := v.Open(); err != nil { panic(err) } @@ -41,7 +41,7 @@ func MustOpenView(db, frame, name string) *View { // Close closes the view and removes all underlying data. func (v *View) Close() error { defer os.Remove(v.Path()) - defer v.BitmapAttrStore.Close() + defer v.RowAttrStore.Close() return v.View.Close() } @@ -52,28 +52,28 @@ func (v *View) Reopen() error { return err } - v.View = pilosa.NewView(path, v.DB(), v.Frame(), v.Name(), pilosa.DefaultCacheSize) - v.View.BitmapAttrStore = v.BitmapAttrStore.AttrStore + v.View = pilosa.NewView(path, v.Index(), v.Frame(), v.Name(), pilosa.DefaultCacheSize) + v.View.RowAttrStore = v.RowAttrStore.AttrStore if err := v.Open(); err != nil { return err } return nil } -// MustSetBits sets bits on a bitmap. Panic on error. +// MustSetBits sets bits on a row. Panic on error. // This function does not accept a timestamp or quantum. -func (v *View) MustSetBits(bitmapID uint64, profileIDs ...uint64) { - for _, profileID := range profileIDs { - if _, err := v.SetBit(bitmapID, profileID); err != nil { +func (v *View) MustSetBits(rowID uint64, columnIDs ...uint64) { + for _, columnID := range columnIDs { + if _, err := v.SetBit(rowID, columnID); err != nil { panic(err) } } } -// MustClearBits clears bits on a bitmap. Panic on error. -func (v *View) MustClearBits(bitmapID uint64, profileIDs ...uint64) { - for _, profileID := range profileIDs { - if _, err := v.ClearBit(bitmapID, profileID); err != nil { +// MustClearBits clears bits on a row. Panic on error. +func (v *View) MustClearBits(rowID uint64, columnIDs ...uint64) { + for _, columnID := range columnIDs { + if _, err := v.ClearBit(rowID, columnID); err != nil { panic(err) } }