diff --git a/.idea/data-acc.iml b/.idea/data-acc.iml
index c956989b..7f988569 100644
--- a/.idea/data-acc.iml
+++ b/.idea/data-acc.iml
@@ -1,8 +1,14 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/dictionaries/john.xml b/.idea/dictionaries/john.xml
new file mode 100644
index 00000000..1aeb6e0d
--- /dev/null
+++ b/.idea/dictionaries/john.xml
@@ -0,0 +1,11 @@
+
+
+
+ ansible
+ dacctl
+ dacd
+ nodehostnamefile
+ slurm
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 00000000..85fa229c
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/webResources.xml b/.idea/webResources.xml
new file mode 100644
index 00000000..717d9d66
--- /dev/null
+++ b/.idea/webResources.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/build/rebuild_mocks.sh b/build/rebuild_mocks.sh
index 54e21653..b92a41db 100755
--- a/build/rebuild_mocks.sh
+++ b/build/rebuild_mocks.sh
@@ -6,24 +6,32 @@ echo "Regenerate mocks:"
mkdir -p internal/pkg/mocks
-items="pool volume"
+items="session session_action_handler"
for i in $items; do
- mockgen -source=internal/pkg/registry/${i}.go \
- -package mocks >internal/pkg/mocks/${i}_mock.go
+ mockgen -source=internal/pkg/facade/${i}.go \
+ >internal/pkg/mock_facade/${i}.go
done
-items="job"
+items="disk"
for i in $items; do
- mockgen -source=internal/pkg/dacctl/${i}.go \
- -package mocks >internal/pkg/mocks/${i}_mock.go
+ mockgen -source=internal/pkg/fileio/${i}.go \
+ >internal/pkg/mock_fileio/${i}_mock.go
done
-items="disk"
+items="provider ansible"
for i in $items; do
- mockgen -source=internal/pkg/fileio/${i}.go \
- -package mocks >internal/pkg/mocks/${i}_mock.go
+ mockgen -source=internal/pkg/filesystem/${i}.go \
+ >internal/pkg/mock_filesystem/${i}.go
done
-items="interface"
-mockgen -source=internal/pkg/pfsprovider/interface.go \
- -package mocks >internal/pkg/mocks/pfsprovider_mock.go
+items="brick_allocation brick_host session session_actions"
+for i in $items; do
+ mockgen -source=internal/pkg/registry/${i}.go \
+ >internal/pkg/mock_registry/${i}.go
+done
+
+items="keystore"
+for i in $items; do
+ mockgen -source=internal/pkg/store/${i}.go \
+ >internal/pkg/mock_store/${i}.go
+done
diff --git a/cmd/dac-func-test/dacctl.go b/cmd/dac-func-test/dacctl.go
deleted file mode 100644
index aa832054..00000000
--- a/cmd/dac-func-test/dacctl.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package main
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
-)
-
-func debugStatus(volumeRegistry registry.VolumeRegistry, poolRegistry registry.PoolRegistry) {
- log.Println(dacctl.GetPools(poolRegistry))
- log.Println(dacctl.GetInstances(volumeRegistry))
- log.Println(dacctl.GetSessions(volumeRegistry))
- log.Println(volumeRegistry.AllVolumes())
-}
-
-func testPersistent(volumeRegistry registry.VolumeRegistry, poolRegistry registry.PoolRegistry) {
- bufferToken := "fakebuffer1"
- bufferRequest := dacctl.BufferRequest{
- Token: bufferToken,
- Capacity: "b:10GiB",
- Persistent: true,
- Caller: "test",
- }
- debugStatus(volumeRegistry, poolRegistry)
-
- err := dacctl.CreateVolumesAndJobs(volumeRegistry, poolRegistry, bufferRequest)
- if err != nil {
- log.Fatal(err)
- }
-
- bufferRequest2 := bufferRequest
- bufferRequest2.Token = "fakebuffer2"
- err = dacctl.CreateVolumesAndJobs(volumeRegistry, poolRegistry, bufferRequest2)
- if err != nil {
- log.Fatal(err)
- }
-
- bufferRequest3 := dacctl.BufferRequest{
- Token: "fakebuffer3",
- Capacity: "a:0",
- Persistent: true,
- Caller: "test",
- }
- err = dacctl.CreateVolumesAndJobs(volumeRegistry, poolRegistry, bufferRequest3)
- if err != nil {
- log.Fatal(err)
- }
-
- debugStatus(volumeRegistry, poolRegistry)
-
- // TODO go through state machine for a given volume...?
- // TODO fix up paths, real_size, etc
- // TODO record all the data for fake data_in, etc
- // TODO add wait for actions into volume state machine
-
- log.Println(dacctl.DeleteBufferComponents(volumeRegistry, poolRegistry, bufferToken))
- log.Println(dacctl.DeleteBufferComponents(volumeRegistry, poolRegistry, "fakebuffer2"))
- log.Println(dacctl.DeleteBufferComponents(volumeRegistry, poolRegistry, "fakebuffer3"))
-
- debugStatus(volumeRegistry, poolRegistry)
-}
-
-func TestDacctl(keystore keystoreregistry.Keystore) {
- log.Println("Testing dacctl")
-
- volumeRegistry := keystoreregistry.NewVolumeRegistry(keystore)
- poolRegistry := keystoreregistry.NewPoolRegistry(keystore)
-
- testPersistent(volumeRegistry, poolRegistry)
-}
diff --git a/cmd/dac-func-test/etcdkeystore.go b/cmd/dac-func-test/etcdkeystore.go
deleted file mode 100644
index ba0496dc..00000000
--- a/cmd/dac-func-test/etcdkeystore.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package main
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "log"
- "runtime"
- "time"
-)
-
-func cleanAllKeys(keystore keystoreregistry.Keystore) {
- if err := keystore.CleanPrefix(""); err != nil {
- log.Println("Error cleaning: ", err)
- }
-}
-
-func testAddValues(keystore keystoreregistry.Keystore) {
- values := []keystoreregistry.KeyValue{
- {Key: "key1", Value: "value1"},
- {Key: "key2", Value: "value2"},
- }
-
- if err := keystore.Add(values); err != nil {
- log.Fatalf("Error with add values")
- }
-
- if err := keystore.Add(values); err == nil {
- log.Fatalf("Expected an error")
- } else {
- log.Println(err)
- }
-}
-
-func testGet(keystore keystoreregistry.Keystore) {
- value, _ := keystore.Get("key1")
- log.Println(value)
- _, err := keystore.Get("key3")
- if err == nil {
- log.Fatalf("failed to raise error")
- } else {
- log.Println(err)
- }
-
- values, _ := keystore.GetAll("key")
- log.Println(values)
- _, err = keystore.GetAll("key3")
- if err == nil {
- log.Fatalf("failed to raise error")
- } else {
- log.Println(err)
- }
-}
-
-func testUpdate(keystore keystoreregistry.Keystore) {
- values, err := keystore.GetAll("key")
- if err != nil {
- log.Fatal(err)
- }
-
- values[0].Value = "asdf"
- values[1].Value = "asdf2"
-
- err = keystore.Update(values)
- if err != nil {
- log.Fatal(err)
- }
-
- // Error if ModVersion out of sync
- err = keystore.Update(values)
- if err == nil {
- log.Fatal("Failed to raise error")
- } else {
- log.Println(err)
- }
-
- // Ensure success if told to ignore ModRevision
- values[0].ModRevision = 0
- values[1].ModRevision = 0
- values[1].Key = "key3" // add value via update
- err = keystore.Update(values)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func testDeleteAll(keystore keystoreregistry.Keystore) {
- values, err := keystore.GetAll("key")
- if err != nil {
- log.Fatal(err)
- }
-
- err = keystore.DeleteAll(values)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func testKeepAlive(keystore keystoreregistry.Keystore) {
- err := keystore.KeepAliveKey("/mytesthost")
- if err != nil {
- log.Fatal(err)
- }
-
- err = keystore.KeepAliveKey("/mytesthost")
- if err == nil {
- log.Fatal("expected error")
- } else {
- log.Println(err)
- }
-}
-
-func TestEtcdKeystore(keystore keystoreregistry.Keystore) {
- log.Println("Testing etcdkeystore...")
-
- testAddValues(keystore)
- testGet(keystore)
- testUpdate(keystore)
- testDeleteAll(keystore)
- testKeepAlive(keystore)
-
- // Give background things time to finish
- time.Sleep(time.Millisecond * 100)
- runtime.Gosched()
-}
diff --git a/cmd/dac-func-test/main.go b/cmd/dac-func-test/main.go
deleted file mode 100644
index 9c19d22a..00000000
--- a/cmd/dac-func-test/main.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package main
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/etcdregistry"
-)
-
-func main() {
- keystore := etcdregistry.NewKeystore()
- defer keystore.Close()
-
- cleanAllKeys(keystore)
-
- TestEtcdKeystore(keystore)
- fmt.Println("")
-
- TestKeystorePoolRegistry(keystore)
- fmt.Println("")
-
- TestKeystoreVolumeRegistry(keystore)
- fmt.Println("")
-
- TestDacctl(keystore)
-}
diff --git a/cmd/dac-func-test/pool.go b/cmd/dac-func-test/pool.go
deleted file mode 100644
index b2536e13..00000000
--- a/cmd/dac-func-test/pool.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package main
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
-)
-
-func testGetPools(poolRegistry registry.PoolRegistry) {
- if pools, err := poolRegistry.Pools(); err != nil {
- log.Fatal(err)
- } else {
- log.Println(pools)
- }
-}
-
-func testUpdateHost(poolRegistry registry.PoolRegistry) {
- brickInfo := []registry.BrickInfo{
- {Hostname: "foo", Device: "vbdb1", PoolName: "a", CapacityGB: 10},
- {Hostname: "foo", Device: "nvme3n1", PoolName: "b", CapacityGB: 20},
- {Hostname: "foo", Device: "nvme2n1", PoolName: "b", CapacityGB: 20},
- }
- err := poolRegistry.UpdateHost(brickInfo)
- if err != nil {
- log.Fatal(err)
- } else {
- log.Println("added some keys")
- }
-
- // Do not allow multiple hostnames to be updated
- brickInfo = []registry.BrickInfo{
- {Hostname: "foo", Device: "vbdb1", PoolName: "a", CapacityGB: 10},
- {Hostname: "bar", Device: "nvme3n1", PoolName: "b", CapacityGB: 20},
- }
- err = poolRegistry.UpdateHost(brickInfo)
- if err == nil {
- log.Fatal("expected error")
- } else {
- log.Println(err)
- }
-}
-
-func testGetBricks(poolRegistry registry.PoolRegistry) {
- if raw, err := poolRegistry.GetBrickInfo("foo", "vbdb1"); err != nil {
- log.Fatal(err)
- } else {
- log.Println(raw)
- }
-
- if _, err := poolRegistry.GetBrickInfo("asdf", "vbdb1"); err != nil {
- log.Println(err)
- } else {
- log.Fatal("expected error")
- }
-}
-
-func testGetAllocations(poolRegistry registry.PoolRegistry) {
- allocations, err := poolRegistry.GetAllocationsForHost("foo")
- if err != nil {
- log.Fatal(err)
- }
- log.Println(allocations)
-
- allocations, err = poolRegistry.GetAllocationsForVolume("vol1")
- if err != nil {
- log.Fatal(err)
- }
- log.Println(allocations)
-
- err = poolRegistry.DeallocateBricks("vol1")
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func testDeleteAllocations(poolRegistry registry.PoolRegistry) {
- updatedAllocations, err := poolRegistry.GetAllocationsForVolume("vol1")
- if err != nil {
- log.Fatal(err)
- }
- err = poolRegistry.HardDeleteAllocations(updatedAllocations)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func testKeepHostAlive(poolRegistry registry.PoolRegistry) {
- err := poolRegistry.KeepAliveHost("foo")
- if err != nil {
- log.Fatal(err)
- }
- err = poolRegistry.KeepAliveHost("bar")
- if err != nil {
- log.Fatal(err)
- }
-
- err = poolRegistry.KeepAliveHost("foo")
- if err == nil {
- log.Fatal("expected error")
- } else {
- log.Println(err)
- }
-}
-
-func TestKeystorePoolRegistry(keystore keystoreregistry.Keystore) {
- log.Println("Testing keystoreregistry.pool")
-
- cleanAllKeys(keystore)
-
- poolRegistry := keystoreregistry.NewPoolRegistry(keystore)
- testUpdateHost(poolRegistry)
- testGetBricks(poolRegistry)
- testGetAllocations(poolRegistry)
- testDeleteAllocations(poolRegistry)
- testKeepHostAlive(poolRegistry)
-
- // TODO: update hosts first?
- testGetPools(poolRegistry)
-}
diff --git a/cmd/dac-func-test/volume.go b/cmd/dac-func-test/volume.go
deleted file mode 100644
index 138d5d56..00000000
--- a/cmd/dac-func-test/volume.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package main
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "time"
-)
-
-func TestKeystoreVolumeRegistry(keystore keystoreregistry.Keystore) {
- log.Println("Testing keystoreregistry.volume")
- volumeRegistry := keystoreregistry.NewVolumeRegistry(keystore)
-
- testVolumeCRUD(volumeRegistry)
- testJobCRUD(volumeRegistry)
-
- // give watches time to print
- time.Sleep(time.Second)
-}
-
-func testVolumeCRUD(volRegistry registry.VolumeRegistry) {
- // TODO: test get volume changes?
-
- volume := registry.Volume{Name: "asdf", State: registry.Registered, JobName: "foo", SizeBricks: 2, SizeGB: 200}
- volume2 := registry.Volume{Name: "asdf2", JobName: "foo", SizeBricks: 3, SizeGB: 300}
- if err := volRegistry.AddVolume(volume); err != nil {
- log.Fatal(err)
- }
- if err := volRegistry.AddVolume(volume); err == nil {
- log.Fatal("expected an error")
- } else {
- log.Println(err)
- }
-
- if volume, err := volRegistry.Volume(volume.Name); err != nil {
- log.Fatal(err)
- } else {
- log.Println(volume)
- }
-
- if err := volRegistry.DeleteVolume(volume.Name); err != nil {
- log.Fatal(err)
- }
- if err := volRegistry.DeleteVolume(volume.Name); err == nil {
- log.Fatal("expected error")
- } else {
- log.Println(err)
- }
-
- // leave around for following tests
- volRegistry.AddVolume(volume)
- volRegistry.AddVolume(volume2)
-
- if err := volRegistry.UpdateState(volume.Name, registry.BricksProvisioned); err != nil {
- log.Fatal(err)
- }
- if err := volRegistry.UpdateState("badname", registry.BricksProvisioned); err == nil {
- log.Fatal("expected error")
- }
- if err := volRegistry.UpdateState(volume.Name, registry.BricksProvisioned); err == nil {
- log.Fatal("expected error with repeated update")
- }
- if err := volRegistry.UpdateState(volume.Name, registry.Unknown); err == nil {
- log.Fatal("expected error with out of order update")
- }
- volRegistry.UpdateState(volume2.Name, registry.Registered)
-
- if volumes, err := volRegistry.AllVolumes(); err != nil {
- log.Fatal(err)
- } else {
- log.Println(volumes)
- }
- // testJobCRUD uses volume and volume1
-}
-
-func testJobCRUD(volRegistry registry.VolumeRegistry) {
- job := registry.Job{Name: "foo",
- MultiJobVolumes: []registry.VolumeName{"asdf", "asdf2"},
- Owner: 1001,
- CreatedAt: uint(time.Now().Unix()),
- }
- if err := volRegistry.AddJob(job); err != nil {
- log.Fatal(err)
- }
-
- if err := volRegistry.AddJob(job); err == nil {
- log.Fatal("expected an error adding duplicate job")
- }
- badJob := registry.Job{Name: "bar", MultiJobVolumes: []registry.VolumeName{"asdf", "asdf3"}}
- if err := volRegistry.AddJob(badJob); err == nil {
- log.Fatal("expected an error for invalid volume name")
- }
-
- jobs, err := volRegistry.Jobs()
- if err != nil {
- log.Fatal(err)
- }
- log.Println(jobs)
-
- err = volRegistry.DeleteJob("foo")
- if err != nil {
- panic(err)
- }
- err = volRegistry.DeleteJob("foo")
- if err == nil {
- panic(err)
- }
-
- // remove volumes now we are done with them
- volRegistry.DeleteVolume(registry.VolumeName("asdf"))
- volRegistry.DeleteVolume(registry.VolumeName("asdf2"))
-}
diff --git a/cmd/dacctl/actions.go b/cmd/dacctl/actions.go
index 8f8d87c4..7c7c4034 100644
--- a/cmd/dacctl/actions.go
+++ b/cmd/dacctl/actions.go
@@ -1,66 +1,77 @@
package main
import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/etcdregistry"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl"
"github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store_impl"
"github.com/urfave/cli"
)
-var testKeystore keystoreregistry.Keystore
+var testKeystore store.Keystore
var testDisk fileio.Disk
-var testActions actions.DacctlActions
+var testActions dacctl.DacctlActions
-func getKeystore() keystoreregistry.Keystore {
+func getKeystore() store.Keystore {
// TODO must be a better way to test this, proper factory?
if testKeystore != nil {
return testKeystore
}
- return etcdregistry.NewKeystore()
+ return store_impl.NewKeystore()
}
-func getActions(keystore keystoreregistry.Keystore) actions.DacctlActions {
+func getActions(keystore store.Keystore) dacctl.DacctlActions {
if testActions != nil {
return testActions
}
- volReg := keystoreregistry.NewVolumeRegistry(keystore)
- poolReg := keystoreregistry.NewPoolRegistry(keystore)
- disk := testDisk
- if testDisk == nil {
- disk = fileio.NewDisk()
- }
- return actions.NewDacctlActions(poolReg, volReg, disk)
+ disk := fileio.NewDisk()
+ return actions_impl.NewDacctlActions(keystore, disk)
}
func createPersistent(c *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).CreatePersistentBuffer(c)
+ err := getActions(keystore).CreatePersistentBuffer(c)
+ if err == nil {
+ // Slurm is looking for the string "created" to know this worked
+ fmt.Printf("created %s\n", c.String("token"))
+ }
+ return err
+}
+
+func printOutput(function func() (string, error)) error {
+ sessions, err := function()
+ if err != nil {
+ return err
+ }
+ fmt.Println(sessions)
+ return nil
}
func showInstances(_ *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).ShowInstances()
+ return printOutput(getActions(keystore).ShowInstances)
}
func showSessions(_ *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).ShowSessions()
+ return printOutput(getActions(keystore).ShowSessions)
}
func listPools(_ *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).ListPools()
+ return printOutput(getActions(keystore).ListPools)
}
func showConfigurations(_ *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).ShowConfigurations()
+ return printOutput(getActions(keystore).ShowConfigurations)
}
func teardown(c *cli.Context) error {
@@ -84,7 +95,9 @@ func setup(c *cli.Context) error {
func realSize(c *cli.Context) error {
keystore := getKeystore()
defer keystore.Close()
- return getActions(keystore).RealSize(c)
+ return printOutput(func() (s string, e error) {
+ return getActions(keystore).RealSize(c)
+ })
}
func dataIn(c *cli.Context) error {
@@ -116,3 +129,11 @@ func dataOut(c *cli.Context) error {
defer keystore.Close()
return getActions(keystore).DataOut(c)
}
+
+func generateAnsible(c *cli.Context) error {
+ keystore := getKeystore()
+ defer keystore.Close()
+ return printOutput(func() (s string, e error) {
+ return getActions(keystore).GenerateAnsible(c)
+ })
+}
diff --git a/cmd/dacctl/main.go b/cmd/dacctl/main.go
index 39a92c15..186d0888 100644
--- a/cmd/dacctl/main.go
+++ b/cmd/dacctl/main.go
@@ -1,6 +1,8 @@
package main
import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
"github.com/RSE-Cambridge/data-acc/pkg/version"
"github.com/urfave/cli"
"log"
@@ -160,19 +162,21 @@ func runCli(args []string) error {
Usage: "Returns fake data to keep burst buffer plugin happy.",
Action: showConfigurations,
},
+ {
+ Name: "generate_ansible",
+ Usage: "Creates debug ansible in debug ansible.",
+ Action: generateAnsible,
+ Flags: []cli.Flag{token},
+ },
}
-
return app.Run(stripFunctionArg(args))
}
func main() {
- logFilename := os.Getenv("DACCTL_LOG")
- if logFilename == "" {
- logFilename = "/var/log/dacctl.log"
- }
+ logFilename := config.GetDacctlLog()
f, err := os.OpenFile(logFilename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
- log.Fatalf("error opening file: %v", err)
+ log.Fatalf("please use DACCTL_LOG to configure an alternative, as error opening file: %v ", err)
}
defer f.Close()
@@ -188,6 +192,7 @@ func main() {
log.Println("dacctl start, called with:", strings.Join(os.Args, " "))
if err := runCli(os.Args); err != nil {
+ fmt.Println(err)
log.Println("dacctl error, called with:", strings.Join(os.Args, " "))
log.Fatal(err)
} else {
diff --git a/cmd/dacctl/main_test.go b/cmd/dacctl/main_test.go
index 1edf6837..ae73ada7 100644
--- a/cmd/dacctl/main_test.go
+++ b/cmd/dacctl/main_test.go
@@ -4,9 +4,10 @@ import (
"context"
"errors"
"fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
"github.com/stretchr/testify/assert"
+ "log"
"strings"
"testing"
)
@@ -108,6 +109,9 @@ func TestShow(t *testing.T) {
err = runCli([]string{"--function", "show_configurations"})
assert.Equal(t, "ShowConfigurations", err.Error())
+
+ err = runCli([]string{"--function", "generate_ansible", "--token", "foo"})
+ assert.Equal(t, "GenerateAnsible", err.Error())
}
func TestFlow(t *testing.T) {
@@ -143,83 +147,108 @@ func TestFlow(t *testing.T) {
type stubKeystore struct{}
func (*stubKeystore) Close() error {
+ log.Println("closed")
return nil
}
-func (*stubKeystore) CleanPrefix(prefix string) error {
- panic("implement me")
-}
-func (*stubKeystore) Add(keyValues []keystoreregistry.KeyValue) error {
+
+func (*stubKeystore) Create(key string, value []byte) (int64, error) {
panic("implement me")
}
-func (*stubKeystore) Update(keyValues []keystoreregistry.KeyValueVersion) error {
+
+func (*stubKeystore) Update(key string, value []byte, modRevision int64) (int64, error) {
panic("implement me")
}
-func (*stubKeystore) DeleteAll(keyValues []keystoreregistry.KeyValueVersion) error {
+
+func (*stubKeystore) Delete(key string, modRevision int64) error {
panic("implement me")
}
-func (*stubKeystore) GetAll(prefix string) ([]keystoreregistry.KeyValueVersion, error) {
+
+func (*stubKeystore) DeleteAllKeysWithPrefix(keyPrefix string) (int64, error) {
panic("implement me")
}
-func (*stubKeystore) Get(key string) (keystoreregistry.KeyValueVersion, error) {
+
+func (*stubKeystore) GetAll(keyPrefix string) ([]store.KeyValueVersion, error) {
panic("implement me")
}
-func (*stubKeystore) WatchPrefix(prefix string, onUpdate func(old *keystoreregistry.KeyValueVersion, new *keystoreregistry.KeyValueVersion)) {
+
+func (*stubKeystore) Get(key string) (store.KeyValueVersion, error) {
panic("implement me")
}
-func (*stubKeystore) WatchKey(ctxt context.Context, key string, onUpdate func(old *keystoreregistry.KeyValueVersion, new *keystoreregistry.KeyValueVersion)) {
+
+func (*stubKeystore) IsExist(key string) (bool, error) {
panic("implement me")
}
-func (*stubKeystore) KeepAliveKey(key string) error {
+
+func (*stubKeystore) Watch(ctxt context.Context, key string, withPrefix bool) store.KeyValueUpdateChan {
panic("implement me")
}
-func (*stubKeystore) NewMutex(lockKey string) (keystoreregistry.Mutex, error) {
+
+func (*stubKeystore) KeepAliveKey(ctxt context.Context, key string) error {
panic("implement me")
}
-func (*stubKeystore) Watch(ctxt context.Context, key string, withPrefix bool) keystoreregistry.KeyValueUpdateChan {
+
+func (*stubKeystore) NewMutex(lockKey string) (store.Mutex, error) {
panic("implement me")
}
type stubDacctlActions struct{}
-func (*stubDacctlActions) CreatePersistentBuffer(c actions.CliContext) error {
+func (*stubDacctlActions) CreatePersistentBuffer(c dacctl.CliContext) error {
return fmt.Errorf("CreatePersistentBuffer %s", c.String("token"))
}
-func (*stubDacctlActions) DeleteBuffer(c actions.CliContext) error {
+
+func (*stubDacctlActions) DeleteBuffer(c dacctl.CliContext) error {
return fmt.Errorf("DeleteBuffer %s", c.String("token"))
}
-func (*stubDacctlActions) CreatePerJobBuffer(c actions.CliContext) error {
+
+func (*stubDacctlActions) CreatePerJobBuffer(c dacctl.CliContext) error {
return errors.New("CreatePerJobBuffer")
}
-func (*stubDacctlActions) ShowInstances() error {
- return errors.New("ShowInstances")
+
+func (*stubDacctlActions) ShowInstances() (string, error) {
+ return "", errors.New("ShowInstances")
}
-func (*stubDacctlActions) ShowSessions() error {
- return errors.New("ShowSessions")
+
+func (*stubDacctlActions) ShowSessions() (string, error) {
+ return "", errors.New("ShowSessions")
}
-func (*stubDacctlActions) ListPools() error {
- return errors.New("ListPools")
+
+func (*stubDacctlActions) ListPools() (string, error) {
+ return "", errors.New("ListPools")
}
-func (*stubDacctlActions) ShowConfigurations() error {
- return errors.New("ShowConfigurations")
+
+func (*stubDacctlActions) ShowConfigurations() (string, error) {
+ return "", errors.New("ShowConfigurations")
}
-func (*stubDacctlActions) ValidateJob(c actions.CliContext) error {
+
+func (*stubDacctlActions) ValidateJob(c dacctl.CliContext) error {
return errors.New("ValidateJob")
}
-func (*stubDacctlActions) RealSize(c actions.CliContext) error {
- return errors.New("RealSize")
+
+func (*stubDacctlActions) RealSize(c dacctl.CliContext) (string, error) {
+ return "", errors.New("RealSize")
}
-func (*stubDacctlActions) DataIn(c actions.CliContext) error {
+
+func (*stubDacctlActions) DataIn(c dacctl.CliContext) error {
return errors.New("DataIn")
}
-func (*stubDacctlActions) Paths(c actions.CliContext) error {
+
+func (*stubDacctlActions) Paths(c dacctl.CliContext) error {
return errors.New("Paths")
}
-func (*stubDacctlActions) PreRun(c actions.CliContext) error {
+
+func (*stubDacctlActions) PreRun(c dacctl.CliContext) error {
return errors.New("PreRun")
}
-func (*stubDacctlActions) PostRun(c actions.CliContext) error {
+
+func (*stubDacctlActions) PostRun(c dacctl.CliContext) error {
return errors.New("PostRun")
}
-func (*stubDacctlActions) DataOut(c actions.CliContext) error {
+
+func (*stubDacctlActions) DataOut(c dacctl.CliContext) error {
return errors.New("DataOut")
}
+
+func (*stubDacctlActions) GenerateAnsible(c dacctl.CliContext) (string, error) {
+ return "", errors.New("GenerateAnsible")
+}
diff --git a/cmd/dacd/main.go b/cmd/dacd/main.go
index 3be34ac8..8a087c44 100644
--- a/cmd/dacd/main.go
+++ b/cmd/dacd/main.go
@@ -1,37 +1,36 @@
package main
import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/etcdregistry"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/lifecycle/brickmanager"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacd"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacd/brick_manager_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store_impl"
"log"
"os"
"os/signal"
"syscall"
)
-func waitForShutdown() {
+func waitForShutdown(manager dacd.BrickManager) {
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGINT)
<-c
log.Println("I have been asked to shutdown, doing tidy up...")
+ manager.Shutdown()
os.Exit(1)
}
func main() {
log.Println("Starting data-accelerator's brick manager")
- keystore := etcdregistry.NewKeystore()
- defer keystore.Close()
- poolRegistry := keystoreregistry.NewPoolRegistry(keystore)
- volumeRegistry := keystoreregistry.NewVolumeRegistry(keystore)
+ keystore := store_impl.NewKeystore()
+ defer func() {
+ log.Println("keystore closed with error: ", keystore.Close())
+ }()
- manager := brickmanager.NewBrickManager(poolRegistry, volumeRegistry)
- if err := manager.Start(); err != nil {
- log.Fatal(err)
- }
+ manager := brick_manager_impl.NewBrickManager(keystore)
+ manager.Startup()
log.Println("Brick manager started for:", manager.Hostname())
- waitForShutdown()
+ waitForShutdown(manager)
}
diff --git a/dac-ansible/master.yml b/dac-ansible/master.yml
index d865e42c..e0482ddb 100644
--- a/dac-ansible/master.yml
+++ b/dac-ansible/master.yml
@@ -305,13 +305,6 @@
- docker_compose:
project_src: /var/lib/slurm-docker/slurm-master
register: output
- - name: ensure slurm cluster registered in db
- shell: |
- sleep 10 && docker exec slurmctld bash -c "/usr/bin/sacctmgr --immediate add cluster name=linux" && docker restart slurmdbd slurmctld
- register: shell_result
- changed_when: "shell_result.rc == 0"
- failed_when: "shell_result.rc != 0 and ('already exists' not in shell_result.stdout)"
- when: output.changed
- hosts: slurm_workers
become: true
diff --git a/dac-ansible/roles/data-acc/defaults/main.yml b/dac-ansible/roles/data-acc/defaults/main.yml
index 49640e67..c5591795 100644
--- a/dac-ansible/roles/data-acc/defaults/main.yml
+++ b/dac-ansible/roles/data-acc/defaults/main.yml
@@ -1,5 +1,6 @@
---
-data_acc_version: v1.3
+data_acc_version: 'v2.0-alpha.12'
+data_acc_checksum: 'sha256:3e2d42757c5a0a77d967fabac6b722e597077aeb8edf1fe6225c5203827829c5'
data_acc_platform: linux-amd64
data_acc_mirror: https://github.com/RSE-Cambridge/data-acc/releases/download
data_acc_install_dir: /usr/local/bin
@@ -11,7 +12,6 @@ data_acc_launch: True
data_acc_name: 'data-acc-{{data_acc_version}}'
data_acc_tgz: '{{data_acc_name}}.tgz'
data_acc_tgz_url: '{{ data_acc_mirror }}/{{ data_acc_version }}/data-acc-{{ data_acc_version }}.tgz'
-data_acc_checksum: 'sha256:0d6a8a5b9ccc30c7a7505efcf2858997c5c4487d57fd5ef45d075f25877c9485'
data_acc_user: dac
data_acc_group: dac
diff --git a/dac-ansible/roles/data-acc/templates/dacd.conf.j2 b/dac-ansible/roles/data-acc/templates/dacd.conf.j2
index 7c49370c..13bc311a 100644
--- a/dac-ansible/roles/data-acc/templates/dacd.conf.j2
+++ b/dac-ansible/roles/data-acc/templates/dacd.conf.j2
@@ -4,14 +4,17 @@ ETCDCTL_ENDPOINTS=https://{{ hostvars[groups['etcd_master'][0]].ansible_host }}:
ETCDCTL_CERT_FILE=/etc/data-acc/pki/{{ inventory_hostname }}.pem
ETCDCTL_KEY_FILE=/etc/data-acc/pki/{{ inventory_hostname }}-key.pem
ETCDCTL_CA_FILE=/etc/data-acc/pki/ca.pem
-DAC_ANSIBLE_DIR={{data_acc_install_dir}}/{{data_acc_name}}/fs-ansible/
+
+# dacd config settings
+DAC_POOL_NAME=default
+DAC_BRICK_CAPACITY_GB=1600
+DAC_BRICK_COUNT=5
+DAC_BRICK_ADDRESS_PATTERN="loop%d"
+
#DAC_SKIP_ANSIBLE=True
+DAC_ANSIBLE_DIR={{data_acc_install_dir}}/{{data_acc_name}}/fs-ansible/
DAC_HOST_GROUP=dac-fake
-DEVICE_COUNT=5
-DAC_POOL_NAME=default
-DAC_DEVICE_CAPACITY_GB=1600
DACCTL_LOG=/var/log/dacctl.log
#DAC_LNET_SUFFIX="-opa@o2ib1"
-DEVICE_TYPE="loop%d"
DAC_MDT_SIZE_MB="50"
DAC_MAX_MDT_COUNT=2
diff --git a/docker-slurm/Dockerfile b/docker-slurm/Dockerfile
index 6afecd50..186e21ca 100644
--- a/docker-slurm/Dockerfile
+++ b/docker-slurm/Dockerfile
@@ -8,7 +8,7 @@ LABEL org.label-schema.docker.cmd="docker-compose up -d" \
org.label-schema.description="Slurm Docker cluster on CentOS 7" \
maintainer="John Garbutt"
-ARG SLURM_TAG=slurm-19-05-1-2
+ARG SLURM_TAG=slurm-19-05-2-1
ARG GOSU_VERSION=1.11
RUN set -ex \
diff --git a/docker-slurm/docker-compose.yml b/docker-slurm/docker-compose.yml
index 47bb3641..93677162 100644
--- a/docker-slurm/docker-compose.yml
+++ b/docker-slurm/docker-compose.yml
@@ -111,7 +111,7 @@ services:
- "COMPUTE_NODE=c"
etcd1:
- image: quay.io/coreos/etcd:v3.3.13
+ image: quay.io/coreos/etcd:v3.3.15
command: etcd --name etcd1 --data-dir=/etcd-data --initial-cluster-state new --listen-client-urls http://0.0.0.0:2379 --advertise-client-urls http://0.0.0.0:2379 --listen-peer-urls http://0.0.0.0:2380 --initial-cluster 'etcd1=http://etcd1:2380'
hostname: etcd1
container_name: etcd1
diff --git a/fs-ansible/create.yml b/fs-ansible/create.yml
new file mode 100644
index 00000000..18c2e538
--- /dev/null
+++ b/fs-ansible/create.yml
@@ -0,0 +1,14 @@
+---
+- name: Create Lustre filesystem (format)
+ hosts: all
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: lustre
+ vars:
+ lustre_state: "present"
+ lustre_format_disks: true
+
+ - role: lustre_client_mount
+ vars:
+ lustre_client_mount_present: true
\ No newline at end of file
diff --git a/fs-ansible/dac-beegfs.yml b/fs-ansible/dac-beegfs.yml
deleted file mode 100644
index 6709cf44..00000000
--- a/fs-ansible/dac-beegfs.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Setup beegfs for io500
- hosts: fs1001
- become: yes
- roles:
- - role: beegfs
- vars:
- fs_name: fs1001
diff --git a/fs-ansible/delete.yml b/fs-ansible/delete.yml
new file mode 100644
index 00000000..3a6233c4
--- /dev/null
+++ b/fs-ansible/delete.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Lustre filesystem (format)
+ hosts: all
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: lustre_client_mount
+ vars:
+ lustre_client_mount_present: false
+
+ - role: lustre
+ vars:
+ lustre_state: "absent"
\ No newline at end of file
diff --git a/fs-ansible/demo.sh b/fs-ansible/demo.sh
deleted file mode 100755
index 8755eef0..00000000
--- a/fs-ansible/demo.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-
-. .venv/bin/activate
-
-set -eux
-
-echo Get FS1 up
-echo
-
-ansible-playbook test-dac.yml -i test-inventory --tag reformat_mgs
-
-ansible-playbook test-dac.yml -i test-inventory --tag format_mgs --tag reformat_mdts --tag reformat_osts
-ansible-playbook test-dac.yml -i test-inventory --tag start_mgs --tag start_mdts --tag start_osts --tag mount_fs
-
-echo Show FS works
-echo
-
-ls /mnt/lustre/fs1
-ssh dac2 'sudo bash -c "hostname > /mnt/lustre/fs1/demo"'
-cat /mnt/lustre/fs1/demo
-
-echo Get FS1 down
-echo
-
-ansible-playbook test-dac.yml -i test-inventory --tag umount_fs --tag stop_osts --tag stop_mdts
-ansible-playbook test-dac.yml -i test-inventory --tag reformat_mdts --tag reformat_osts
-
-cat /mnt/lustre/fs1/demo || true
-
-echo Clean MGS
-echo
-
-ansible-playbook test-dac.yml -i test-inventory --tag stop_mgs
-ansible-playbook test-dac.yml -i test-inventory --tag reformat_mgs
-
-echo Bring up an down FS2
-
-ansible-playbook test-dac2.yml -i test-inventory2 --tag format_mgs --tag reformat_mdts --tag reformat_osts
-ansible-playbook test-dac2.yml -i test-inventory2 --tag start_mgs --tag start_mdts --tag start_osts --tag mount_fs
-ansible-playbook test-dac2.yml -i test-inventory2 --tag start_mgs --tag start_mdts --tag start_osts --tag mount_fs
-
-ls /mnt/lustre/fs2
-ssh dac2 'sudo bash -c "hostname > /mnt/lustre/fs2/demo"'
-cat /mnt/lustre/fs2/demo
-
-ansible-playbook test-dac2.yml -i test-inventory2 --tag umount_fs --tag stop_osts --tag stop_mdts
-ansible-playbook test-dac2.yml -i test-inventory2 --tag umount_fs --tag stop_osts --tag stop_mdts
-ansible-playbook test-dac2.yml -i test-inventory2 --tag reformat_mdts --tag reformat_osts
-
-cat /mnt/lustre/fs2/demo || true
-
-echo Clean MGS
-echo
-
-ansible-playbook test-dac.yml -i test-inventory --tag stop_mgs
-ansible-playbook test-dac.yml -i test-inventory --tag reformat_mgs
diff --git a/fs-ansible/group_vars/dac-fake.yaml b/fs-ansible/group_vars/dac-fake.yaml
deleted file mode 100644
index 783bdfa1..00000000
--- a/fs-ansible/group_vars/dac-fake.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-beegfs_host_info:
- nvme0n1:
- if: eth0
- str_port: 8100
- mdt_port: 8200
- mgs_port: 8300
- numa: 0
- nvme1n1:
- if: eth0
- str_port: 8101
- mdt_port: 8201
- mgs_port: 8301
- numa: 0
- nvme2n1:
- if: eth0
- str_port: 8102
- mdt_port: 8202
- mgs_port: 8302
- numa: 0
- nvme3n1:
- if: eth0
- str_port: 8103
- mdt_port: 8203
- mgs_port: 8303
- numa: 0
- nvme4n1:
- if: eth0
- str_port: 8104
- mdt_port: 8204
- mgs_port: 8304
- numa: 0
diff --git a/fs-ansible/group_vars/dac-prod.yaml b/fs-ansible/group_vars/dac-prod.yaml
deleted file mode 100644
index 224f3ad4..00000000
--- a/fs-ansible/group_vars/dac-prod.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-beegfs_host_info:
- nvme0n1:
- if: ib0
- str_port: 8100
- mdt_port: 8200
- mgs_port: 8300
- numa: 0
- nvme1n1:
- if: ib0
- str_port: 8101
- mdt_port: 8201
- mgs_port: 8301
- numa: 0
- nvme2n1:
- if: ib0
- str_port: 8102
- mdt_port: 8202
- mgs_port: 8302
- numa: 0
- nvme3n1:
- if: ib0
- str_port: 8103
- mdt_port: 8203
- mgs_port: 8303
- numa: 0
- nvme4n1:
- if: ib0
- str_port: 8104
- mdt_port: 8204
- mgs_port: 8304
- numa: 0
- nvme5n1:
- if: ib0
- str_port: 8105
- mdt_port: 8205
- mgs_port: 8305
- numa: 0
- nvme6n1:
- if: ib1
- str_port: 8106
- mdt_port: 8206
- mgs_port: 8306
- numa: 1
- nvme7n1:
- if: ib1
- str_port: 8107
- mdt_port: 8207
- mgs_port: 8307
- numa: 1
- nvme8n1:
- if: ib1
- str_port: 8108
- mdt_port: 8208
- mgs_port: 8308
- numa: 1
- nvme9n1:
- if: ib1
- str_port: 8109
- mdt_port: 8209
- mgs_port: 8309
- numa: 1
- nvme10n1:
- if: ib1
- str_port: 8110
- mdt_port: 8210
- mgs_port: 8310
- numa: 1
- nvme11n1:
- if: ib1
- str_port: 8111
- mdt_port: 8211
- mgs_port: 8311
- numa: 1
diff --git a/fs-ansible/inv_gen.py b/fs-ansible/inv_gen.py
deleted file mode 100755
index ed4bd87c..00000000
--- a/fs-ansible/inv_gen.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python2
-import yaml
-from collections import OrderedDict
-
-def main():
-
- dac_no = 2
- #init yaml structure
- dac = {'dac-prod':{'children': {'fs1001':{'hosts':{}}}}}
-
- #create nvme,index as orderd dict to preserve lustre global index order in relation to nvme
- for i in range(1,dac_no+1):
- dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)] = {'fs1001_osts': OrderedDict()}
-
- #set dac 1 as mdt and mgs
- dac['dac-prod']['children']['fs1001']['hosts']['dac-e-1']['fs1001_mgs'] = "nvme0n1"
- dac['dac-prod']['children']['fs1001']['hosts']['dac-e-1']['fs1001_mdt'] = "nvme0n1"
-
- #create keys for nvmes and index init to 0
- for i in range(1,12):
- dac['dac-prod']['children']['fs1001']['hosts']['dac-e-1']['fs1001_osts']["nvme{}n1".format(i)] = 0
-
- for i in range(2,dac_no+1):
- for j in range(0,12):
- dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)]['fs1001_osts']["nvme{}n1".format(j)] = 0
-
- #globaly index all nvmes
- n = 1
- for i in range(1,dac_no+1):
- for key in dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)]['fs1001_osts']:
- dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)]['fs1001_osts'][key] = n
- n+=1
-
- #cast orderdict back to dict for yaml
- for i in range(1,dac_no+1):
- dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)]['fs1001_osts'] = dict(dac['dac-prod']['children']['fs1001']['hosts']["dac-e-{}".format(i)]['fs1001_osts'])
-
- dac['dac-prod']['children']['fs1001']['vars'] = {
- 'fs1001_mgsnode': 'dac-e-1',
- 'fs1001_client_port': '10001'
- }
-
- print(yaml.dump(dac))
-
-
-if __name__ == "__main__":
- main()
diff --git a/fs-ansible/inventory b/fs-ansible/inventory
deleted file mode 100644
index bf1447b6..00000000
--- a/fs-ansible/inventory
+++ /dev/null
@@ -1,13 +0,0 @@
-dac-prod:
- children:
- fs1001:
- hosts:
- dac-e-1:
- fs1001_mgs: nvme0n1
- fs1001_mdt: nvme1n1
- fs1001_osts: {nvme0n1: 2}
- dac2:
- fs1001_osts: {nvme3n1: 1}
- vars:
- fs1001_mgsnode: dac-e-1
- fs1001_client_port: 1001
diff --git a/fs-ansible/restore.yml b/fs-ansible/restore.yml
new file mode 100644
index 00000000..49b3dd3e
--- /dev/null
+++ b/fs-ansible/restore.yml
@@ -0,0 +1,14 @@
+---
+- name: Restore Lustre filesystem
+ hosts: all
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: lustre
+ vars:
+ lustre_state: "present"
+ lustre_format_disks: false
+
+ - role: lustre_client_mount
+ vars:
+ lustre_client_mount_present: true
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre-multirail/tasks/main.yaml b/fs-ansible/roles/lustre-multirail/tasks/main.yaml
deleted file mode 100644
index cc971e39..00000000
--- a/fs-ansible/roles/lustre-multirail/tasks/main.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-
-- name: flush arp ib
- command: ip neigh flush dev {{ item }}
- with_items:
- - ib0
- - ib1
-
-- name: update routes2
- blockinfile:
- path: /etc/iproute2/rt_tables
- block: |
- 200 ib0
- 201 ib1
-
-- name: static routes {{ item.name }}
- command: ip route add 10.47.0.0/16 dev {{ item.name }} proto kernel scope link src {{ item.addr }} table {{ item.name }}
- with_items: "{{ ip }}"
- when:
- - ip is defined
- - item.name is match(target|default(".*"))
- tags: [ 'never' , 'init_routes' ]
-
-- name: ib static routes
- command: ip rule add from {{ item.addr }} table {{ item.name }}
- with_items: "{{ ip }}"
- when:
- - ip is defined
- - item.name is match(target|default(".*"))
- tags: [ 'never' , 'init_routes' ]
-
-- name: ib route flush
- command: ip route flush cache
diff --git a/fs-ansible/roles/lustre-multirail/tasks/main.yaml~ b/fs-ansible/roles/lustre-multirail/tasks/main.yaml~
deleted file mode 100644
index e69de29b..00000000
diff --git a/fs-ansible/roles/lustre/defaults/main.yaml b/fs-ansible/roles/lustre/defaults/main.yaml
new file mode 100644
index 00000000..14a5581e
--- /dev/null
+++ b/fs-ansible/roles/lustre/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+# state is either present or absent
+lustre_state: "present"
+lustre_format_disks: true
+lustre_stop_mgs: false
+
+mdt_size_mb: "50"
+
+# following are usually overwritten by host vars
+mdts: {}
+osts: {}
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre/tasks/format.yaml b/fs-ansible/roles/lustre/tasks/format.yaml
index 2e004b94..483f7c08 100644
--- a/fs-ansible/roles/lustre/tasks/format.yaml
+++ b/fs-ansible/roles/lustre/tasks/format.yaml
@@ -1,13 +1,4 @@
---
-- set_fact:
- mgs: "{{ vars[fs_name + '_mgs'] | default(omit) }}"
- mgsnode: "{{ vars[fs_name + '_mgsnode']}}"
- mdts: "{{ vars[fs_name + '_mdts'] | default({}) }}"
- osts: "{{ vars[fs_name + '_osts'] | default({}) }}"
- client_port: "{{ vars[fs_name + '_client_port'] }}"
- mdt_size: "{{ vars[fs_name + '_mdt_size'] | default('10%') }}"
- tags: [ 'never', 'format_mgs', 'reformat_mgs', 'format', 'clean']
-
- name: Ensure MGS has been formatted
command: /usr/sbin/mkfs.lustre --mgs /dev/{{ mgs }}
register: command_result
@@ -15,109 +6,56 @@
changed_when: "command_result.rc == 0"
when:
- mgs is defined
- tags: [ 'never', 'format_mgs', 'format']
-
-- name: Reformat MGS
- command: /usr/sbin/mkfs.lustre --mgs --reformat /dev/{{ mgs }}
- when:
- - mgs is defined
- tags: [ 'never', 'reformat_mgs']
-
-
-- name: Clean all previous LV and VG
- block:
-
- - name: Remove LV for MDTs
- lvol:
- lv: "mdt"
- vg: "dac-{{ item }}"
- state: absent
- force: yes
- loop: "{{ mdts.keys() | list }}"
- when:
- - mdts is defined
-
- - name: Remove LV for OST
- lvol:
- lv: "ost"
- vg: "dac-{{ item }}"
- state: absent
- force: yes
- loop: "{{ osts.keys() | list }}"
- when:
- - osts is defined
-
- - name: Remove VG for MTDs
- lvg:
- vg: "dac-{{ item }}"
- pvs: "/dev/{{ item }}"
- force: yes
- state: absent
- loop: "{{ mdts.keys() | list }}"
- when:
- - mdts is defined
-
- - name: Remove VG for OSTs
- lvg:
- vg: "dac-{{ item }}"
- pvs: "/dev/{{ item }}"
- force: yes
- state: absent
- loop: "{{ osts.keys() | list }}"
- when:
- - osts is defined
-
- tags: [ 'never', 'reformat_mdts', 'reformat_osts', 'format', 'clean']
-
-- name: Ensure MDTs setup
+- name: Partition disks
block:
-
- - name: Add VG for MDT
- lvg:
- vg: "dac-{{ item }}"
- pvs: "/dev/{{ item }}"
+ - name: Add MDT Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 1
+ part_start: "0%"
+ part_end: "{{ mdt_size_mb }}MB"
+ label: gpt
+ state: present
+ loop: "{{ (mdts.keys() + osts.keys()) | unique }}"
+ - name: Add OST Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 2
+ part_start: "{{ mdt_size_mb }}MB"
+ part_end: "100%"
+ label: gpt
state: present
- loop: "{{ mdts.keys() | list }}"
+ loop: "{{ (mdts.keys() + osts.keys()) | unique }}"
- - name: Add LV for MDT
- lvol:
- lv: "mdt"
- vg: "dac-{{ item }}"
- size: "{{ mdt_size }}"
+- name: Format MDTs
+ block:
+ - name: Add MDT Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 1
+ part_start: "0%"
+ part_end: "{{ mdt_size_mb }}MB"
+ label: gpt
state: present
- loop: "{{ mdts.keys() | list }}"
+ loop: "{{ mdts.keys() }}"
- name: Reformat MDTs
- command: "/usr/sbin/mkfs.lustre --mdt --reformat --fsname={{ fs_name }} --index={{ item.value }} --mgsnode={{ mgsnode }}{{ lnet_suffix }} --mkfsoptions=\"-O large_dir\" /dev/mapper/dac--{{ item.key }}-mdt"
+ command: "/usr/sbin/mkfs.lustre --mdt --reformat --fsname={{ fs_name }} --index={{ item.value }} --mgsnode={{ mgsnode }}{{ lnet_suffix }} /dev/{{ item.key }}p1"
loop: "{{ mdts|dict2items }}"
- when:
- - mdts is defined
- tags: [ 'never', 'reformat_mdts', 'format']
-
-
-- name: Ensure OSTs setup
+- name: Format OSTs
block:
- - name: Add VG for OSTs
- lvg:
- vg: "dac-{{ item }}"
- pvs: "/dev/{{ item }}"
- state: present
- loop: "{{ osts.keys() | list }}"
-
- - name: Add LV for OSTs
- lvol:
- lv: "ost"
- vg: "dac-{{ item }}"
- size: "+100%FREE"
- state: present
- loop: "{{ osts.keys() | list }}"
+ - name: Add OST Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 2
+ part_start: "{{ mdt_size_mb }}MB"
+ part_end: "100%"
+ label: gpt
+ state: present
+ loop: "{{ osts.keys() }}"
- name: Reformat OSTs
- command: "/usr/sbin/mkfs.lustre --ost --reformat --fsname={{ fs_name }} --index={{ item.value }} --mgsnode={{ mgsnode }}{{ lnet_suffix }} /dev/mapper/dac--{{ item.key }}-ost"
+ command: "/usr/sbin/mkfs.lustre --ost --reformat --fsname={{ fs_name }} --index={{ item.value }} --mgsnode={{ mgsnode }}{{ lnet_suffix }} /dev/{{ item.key }}p2"
loop: "{{ osts|dict2items }}"
-
- when:
- - osts is defined
- tags: [ 'never', 'reformat_osts', 'format']
diff --git a/fs-ansible/roles/lustre/tasks/lnet.yaml b/fs-ansible/roles/lustre/tasks/lnet.yaml
deleted file mode 100644
index 67efb688..00000000
--- a/fs-ansible/roles/lustre/tasks/lnet.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: set lnet module options
- lineinfile:
- path: /etc/modprobe.d/lnet.conf
- regexp: '^options'
- line: 'options lnet networks=o2ib1(ib0)'
- tags: ['never', 'start_lnet']
diff --git a/fs-ansible/roles/lustre/tasks/main.yaml b/fs-ansible/roles/lustre/tasks/main.yaml
index 7f451c85..193bee27 100644
--- a/fs-ansible/roles/lustre/tasks/main.yaml
+++ b/fs-ansible/roles/lustre/tasks/main.yaml
@@ -1,5 +1,15 @@
---
-- include: repo.yaml
-- include: lnet.yaml
-- include: format.yaml
-- include: mount.yaml
+- import_tasks: wipe.yaml
+ when: lustre_format_disks|bool and lustre_state == "present"
+
+- import_tasks: format.yaml
+ when: lustre_format_disks|bool and lustre_state == "present"
+
+- import_tasks: mount.yaml
+ when: lustre_state == "present"
+
+- import_tasks: unmount.yaml
+ when: lustre_state == "absent"
+
+- import_tasks: wipe.yaml
+ when: lustre_state == "absent"
diff --git a/fs-ansible/roles/lustre/tasks/mount.yaml b/fs-ansible/roles/lustre/tasks/mount.yaml
index 1e54f949..5030e221 100644
--- a/fs-ansible/roles/lustre/tasks/mount.yaml
+++ b/fs-ansible/roles/lustre/tasks/mount.yaml
@@ -1,21 +1,16 @@
---
- set_fact:
- mgs: "{{ vars[fs_name + '_mgs'] | default(omit) }}"
- mgsnode: "{{ vars[fs_name + '_mgsnode'] }}"
- mdts: "{{ vars[fs_name + '_mdts'] | default({}) }}"
- osts: "{{ vars[fs_name + '_osts'] | default({}) }}"
- client_port: "{{ vars[fs_name + '_client_port'] }}"
+ mdts: "{{ mdts | default({}) }}"
+ osts: "{{ osts | default({}) }}"
tags: [ 'never', 'start_mgs', 'start_mdts', 'start_osts', 'create_mgs', 'create_mdt', 'create_osts', 'stop_all', 'client_mount','client_unmount', 'stop_mgs']
- name: load lustre module
command: modprobe -v lustre
tags: [ 'never', 'start_lustre', 'mount']
-
# Using ordering specified in here:
# http://wiki.lustre.org/Starting_and_Stopping_Lustre_Services
-
- name: Start MGS
block:
- name: Create MGS mount dir
@@ -25,13 +20,12 @@
recurse: yes
- name: mount MGSs
- command: mount -t lustre /dev/{{ mgs }} /lustre/MGS
+ command: mount -t lustre /dev/{{ mgs | default("mgs") }} /lustre/MGS
register: command_result
failed_when: "command_result.rc != 0 and ('is already mounted' not in command_result.stderr)"
changed_when: "command_result.rc == 0"
when:
- mgs is defined
- tags: [ 'never', 'start_mgs', 'create_mgs']
- name: Start MDTs
block:
@@ -43,27 +37,11 @@
with_items: "{{ mdts.keys() | list }}"
- name: mount MDTs
- command: mount -t lustre /dev/mapper/dac--{{ item }}-mdt /lustre/{{ fs_name }}/MDT/{{ item }}
+ command: mount -t lustre /dev/{{ item }}p1 /lustre/{{ fs_name }}/MDT/{{ item }}
register: command_result
failed_when: "command_result.rc != 0 and ('is already mounted' not in command_result.stderr)"
changed_when: "command_result.rc == 0"
with_items: "{{ mdts.keys() | list }}"
- when:
- - mdts is defined
- tags: [ 'never', 'start_mdts', 'create_mdt']
-
-- name: DNE2
- block:
- - name: Enable DNE2 remote dir
- command: "lctl set_param mdt.*.enable_remote_dir=1"
- register: command_result
- failed_when: "command_result.rc != 0 and ('lctl' not in command_result.stderr)"
-
- - name: Enable DNE2 all users
- command: "lctl set_param mdt.*.enable_remote_dir_gid=-1"
- when:
- - mgs is defined
- tags: [ 'never', 'start_mgs', 'create_mgs']
- name: Start OSTs
block:
@@ -75,102 +53,8 @@
with_items: "{{ osts.keys() | list }}"
- name: mount OSTs
- command: mount -t lustre /dev/mapper/dac--{{ item }}-ost /lustre/{{ fs_name }}/OST/{{ item }}
- register: command_result
- failed_when: "command_result.rc != 0 and ('is already mounted' not in command_result.stderr)"
- changed_when: "command_result.rc == 0"
- with_items: "{{ osts.keys() | list }}"
-
- when:
- - osts is defined
- tags: [ 'never', 'start_osts', 'create_osts']
-
-
-- name: mount lustre FS
- block:
- - name: ensure mount dir exists
- file:
- path: "/mnt/lustre/{{ fs_name }}"
- state: directory
- recurse: yes
- - name: mount lustre fs
- command: "mount -t lustre {{ mgsnode }}{{ lnet_suffix }}:/{{ fs_name }} /mnt/lustre/{{ fs_name }}"
+ command: mount -t lustre /dev/{{ item }}p2 /lustre/{{ fs_name }}/OST/{{ item }}
register: command_result
failed_when: "command_result.rc != 0 and ('is already mounted' not in command_result.stderr)"
changed_when: "command_result.rc == 0"
- tags: [ 'never', 'mount_fs', 'client_mount']
-
-
-- name: umount lustre FS
- block:
- - name: umount lustre fs
- command: "umount -l /mnt/lustre/{{ fs_name }}"
- register: command_result
- failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
- changed_when: "command_result.rc == 0"
-
- - name: ensure mount dir deleted
- file:
- path: "/mnt/lustre/{{ fs_name }}"
- state: absent
- tags: [ 'never', 'umount_fs', 'client_unmount']
-
-
-- name: Stop MDTs
- block:
- - name: umount mdts
- command: umount /lustre/{{ fs_name }}/MDT/{{ item }}
- register: command_result
- failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
- changed_when: "command_result.rc == 0"
- with_items: "{{ mdts.keys() | list }}"
-
- - name: remove mdt mount dir
- file:
- path: /lustre/{{ fs_name }}/MDT
- state: absent
- when:
- - mdts is defined
- tags: [ 'never', 'stop_mdts', 'stop_all']
-
-
-- name: Stop OST
- block:
- - name: umount OSTs
- command: umount /lustre/{{ fs_name }}/OST/{{ item }}
- register: command_result
- failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
- changed_when: "command_result.rc == 0"
- with_items: "{{ osts.keys() | list }}"
-
- - name: Remove OST mount dir
- file:
- path: /lustre/{{ fs_name }}/OST
- state: absent
- when:
- - osts is defined
- tags: [ 'never', 'stop_osts', 'stop_all']
-
-
-- name: Stop MGS
- block:
- - name: umount MGS
- command: umount /lustre/MGS
- register: command_result
- failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
- changed_when: "command_result.rc == 0"
-
- - name: Remove MGSDT mount dir
- file:
- path: /lustre/MGS
- state: absent
- when:
- - mgs is defined
- tags: [ 'never', 'stop_mgs']
-
-
-- name: umount all lustre
- command: umount -a -l -t lustre
- when:
- - osts is defined
- tags: [ 'never', 'stop_alllustre']
+ with_items: "{{ osts.keys() | list }}"
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre/tasks/repo.yaml b/fs-ansible/roles/lustre/tasks/repo.yaml
deleted file mode 100644
index b781aa8c..00000000
--- a/fs-ansible/roles/lustre/tasks/repo.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: enable lustre server repo
- yum_repository:
- name: lustre-server
- description: lustre-server
- file: lustre-repo
- baseurl: https://downloads.whamcloud.com/public/lustre/{{ lustre.release }}/el7/{{ lustre.servertype }}
- gpgcheck: no
-
-- name: enable lustre client repo
- yum_repository:
- name: lustre-client
- description: lustre-client
- file: lustre-repo
- baseurl: https://downloads.whamcloud.com/public/lustre/{{ lustre.release }}/el7/client
- gpgcheck: no
-
-- name: enable lustre e2fs repo
- yum_repository:
- name: e2fsprogs-wc
- description: e2fsprogs-wc
- file: lustre-repo
- baseurl: https://downloads.whamcloud.com/public/e2fsprogs/latest/el7
- gpgcheck: no
diff --git a/fs-ansible/roles/lustre/tasks/unmount.yaml b/fs-ansible/roles/lustre/tasks/unmount.yaml
new file mode 100644
index 00000000..9ccdf1f6
--- /dev/null
+++ b/fs-ansible/roles/lustre/tasks/unmount.yaml
@@ -0,0 +1,50 @@
+---
+# Using ordering specified in here:
+# http://wiki.lustre.org/Starting_and_Stopping_Lustre_Services
+
+- name: Stop MDTs
+ block:
+ - name: umount mdts
+ command: umount /lustre/{{ fs_name }}/MDT/{{ item }}
+ register: command_result
+ failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
+ changed_when: "command_result.rc == 0"
+ with_items: "{{ mdts.keys() | list }}"
+
+ - name: remove mdt mount dir
+ file:
+ path: /lustre/{{ fs_name }}/MDT
+ state: absent
+
+- name: Stop OST
+ block:
+ - name: umount OSTs
+ command: umount /lustre/{{ fs_name }}/OST/{{ item }}
+ register: command_result
+ failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
+ changed_when: "command_result.rc == 0"
+ with_items: "{{ osts.keys() | list }}"
+
+ - name: Remove OST mount dir
+ file:
+ path: /lustre/{{ fs_name }}/OST
+ state: absent
+
+- name: Remove OST mount dir
+ file:
+ path: /lustre/{{ fs_name }}
+ state: absent
+
+- name: Stop MGS
+ block:
+ - name: umount MGS
+ command: umount /lustre/MGS
+ register: command_result
+ failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
+ changed_when: "command_result.rc == 0"
+
+ - name: Remove MGSDT mount dir
+ file:
+ path: /lustre/MGS
+ state: absent
+ when: mgs is defined and lustre_stop_mgs
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre/tasks/wipe.yaml b/fs-ansible/roles/lustre/tasks/wipe.yaml
new file mode 100644
index 00000000..e36327f7
--- /dev/null
+++ b/fs-ansible/roles/lustre/tasks/wipe.yaml
@@ -0,0 +1,20 @@
+---
+- set_fact:
+ mdts: "{{ mdts | default({}) }}"
+ osts: "{{ osts | default({}) }}"
+
+# TODO: maybe call wipefs or ss out the block device headers
+
+- name: Remove old MDT Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 1
+ state: absent
+ loop: "{{ (mdts.keys() + osts.keys()) | unique }}"
+
+- name: Remove old OST Partition
+ parted:
+ device: "/dev/{{ item }}"
+ number: 2
+ state: absent
+ loop: "{{ (mdts.keys() + osts.keys()) | unique }}"
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre/vars/main.yaml b/fs-ansible/roles/lustre/vars/main.yaml
deleted file mode 100644
index ea3846a1..00000000
--- a/fs-ansible/roles/lustre/vars/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
- lustre:
- release: lustre-2.10.4
- servertype: patchless-ldiskfs-server
diff --git a/fs-ansible/roles/lustre_client_mount/defaults/main.yaml b/fs-ansible/roles/lustre_client_mount/defaults/main.yaml
new file mode 100644
index 00000000..5638ea37
--- /dev/null
+++ b/fs-ansible/roles/lustre_client_mount/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+lustre_client_mount_present: true
\ No newline at end of file
diff --git a/fs-ansible/roles/lustre_client_mount/tasks/main.yaml b/fs-ansible/roles/lustre_client_mount/tasks/main.yaml
new file mode 100644
index 00000000..25eda07b
--- /dev/null
+++ b/fs-ansible/roles/lustre_client_mount/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: mount lustre FS
+ block:
+ - name: ensure mount dir exists
+ file:
+ path: "/mnt/lustre/{{ fs_name }}"
+ state: directory
+ recurse: yes
+ - name: mount lustre fs
+ command: "mount -t lustre {{ mgsnode }}{{ lnet_suffix }}:/{{ fs_name }} /mnt/lustre/{{ fs_name }}"
+ register: command_result
+ failed_when: "command_result.rc != 0 and ('is already mounted' not in command_result.stderr)"
+ changed_when: "command_result.rc == 0"
+ when: "lustre_client_mount_present|bool"
+
+- name: umount lustre FS
+ block:
+ - name: umount lustre fs
+ command: "umount -l /mnt/lustre/{{ fs_name }}"
+ register: command_result
+ failed_when: "command_result.rc != 0 and ('not mounted' not in command_result.stderr) and ('mountpoint not found' not in command_result.stderr)"
+ changed_when: "command_result.rc == 0"
+
+ - name: ensure mount dir deleted
+ file:
+ path: "/mnt/lustre/{{ fs_name }}"
+ state: absent
+ when: "not (lustre_client_mount_present|bool)"
\ No newline at end of file
diff --git a/fs-ansible/test-dac-beegfs.yml b/fs-ansible/test-dac-beegfs.yml
deleted file mode 100644
index a21c2138..00000000
--- a/fs-ansible/test-dac-beegfs.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Setup beegfs for fs1
- hosts: fs1
- become: yes
- roles:
- - role: beegfs
- vars:
- fs_name: fs1
-
-- name: Setup beegfs for fs2
- hosts: fs2
- become: yes
- roles:
- - role: beegfs
- vars:
- fs_name: fs2
diff --git a/fs-ansible/test-dac-lustre.yml b/fs-ansible/test-dac-lustre.yml
deleted file mode 100644
index 02531021..00000000
--- a/fs-ansible/test-dac-lustre.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Setup beegfs for fs1
- hosts: fs1
- become: yes
- roles:
- - role: lustre
- vars:
- fs_name: fs1
-
-- name: Setup beegfs for fs2
- hosts: fs2
- become: yes
- roles:
- - role: lustre
- vars:
- fs_name: fs2
diff --git a/fs-ansible/test-dac.yml b/fs-ansible/test-dac.yml
deleted file mode 100644
index ccef00c9..00000000
--- a/fs-ansible/test-dac.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Install Lustre for fs1
- hosts: fs1
- become: yes
- gather_facts: no
- roles:
- - role: lustre
diff --git a/fs-ansible/test-inventory-beegfs b/fs-ansible/test-inventory-beegfs
deleted file mode 100644
index 98756e62..00000000
--- a/fs-ansible/test-inventory-beegfs
+++ /dev/null
@@ -1,24 +0,0 @@
-dac-fake:
- children:
- fs1:
- hosts:
- dac1:
- fs1_mgs: nvme0n1
- fs1_mdt: nvme0n1
- fs1_osts: {nvme0n1: 2}
- dac2:
- fs1_osts: {nvme3n1: 1}
- vars:
- fs1_mgsnode: dac1
- fs1_client_port: 1001
- fs2:
- hosts:
- dac1:
- fs2_mgs: nvme3n1
- fs2_mdt: nvme3n1
- fs2_osts: {nvme4n1: 2}
- dac2:
- fs2_osts: {nvme2n1: 1}
- vars:
- fs2_mgsnode: dac1
- fs2_client_port: 1002
diff --git a/fs-ansible/test-inventory-lustre b/fs-ansible/test-inventory-lustre
deleted file mode 100644
index 5429ef8a..00000000
--- a/fs-ansible/test-inventory-lustre
+++ /dev/null
@@ -1,26 +0,0 @@
-dac-fake:
- children:
- fs1:
- hosts:
- dac1:
- fs1_mgs: nvme0n1
- fs1_mdt: nvme1n1
- fs1_osts: {nvme2n1: 2}
- dac2:
- fs1_osts: {nvme3n1: 1}
- vars:
- fs1_mgsnode: dac1
- fs1_client_port: 1001
- lnet_suffix: ""
- fs2:
- hosts:
- dac1:
- fs2_mgs: nvme0n1
- fs2_mdt: nvme3n1
- fs2_osts: {nvme4n1: 2}
- dac2:
- fs2_osts: {nvme2n1: 1}
- vars:
- fs2_mgsnode: dac1
- fs2_client_port: 1002
- lnet_suffix: ""
diff --git a/go.mod b/go.mod
index b0e91f41..db83b47f 100644
--- a/go.mod
+++ b/go.mod
@@ -3,37 +3,62 @@ module github.com/RSE-Cambridge/data-acc
go 1.12
require (
+ cloud.google.com/go v0.44.3 // indirect
+ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
+ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect
github.com/coreos/bbolt v1.3.3 // indirect
- github.com/coreos/etcd v3.3.13+incompatible
+ github.com/coreos/etcd v3.3.15+incompatible
github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect
+ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+ github.com/go-kit/kit v0.9.0 // indirect
+ github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
github.com/golang/mock v1.3.1
- github.com/google/btree v1.0.0 // indirect
- github.com/gorilla/websocket v1.4.0 // indirect
+ github.com/google/go-cmp v0.3.1 // indirect
+ github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect
+ github.com/google/uuid v1.1.1
+ github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway v1.9.4 // indirect
+ github.com/grpc-ecosystem/grpc-gateway v1.10.0 // indirect
+ github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/jonboulle/clockwork v0.1.0 // indirect
- github.com/prometheus/client_golang v1.0.0 // indirect
+ github.com/kisielk/errcheck v1.2.0 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+ github.com/kr/pty v1.1.8 // indirect
+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
+ github.com/pkg/errors v0.8.1 // indirect
+ github.com/prometheus/client_golang v1.1.0 // indirect
+ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect
+ github.com/prometheus/common v0.6.0
+ github.com/prometheus/procfs v0.0.4 // indirect
+ github.com/rogpeppe/fastuuid v1.2.0 // indirect
+ github.com/rogpeppe/go-internal v1.3.1 // indirect
+ github.com/sirupsen/logrus v1.4.2 // indirect
github.com/soheilhy/cmux v0.1.4 // indirect
- github.com/stretchr/testify v1.3.0
+ github.com/stretchr/objx v0.2.0 // indirect
+ github.com/stretchr/testify v1.4.0
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
- github.com/urfave/cli v1.20.0
+ github.com/urfave/cli v1.21.0
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.3 // indirect
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0 // indirect
- golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect
- golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect
- golang.org/x/text v0.3.2 // indirect
- golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
- google.golang.org/appengine v1.4.0 // indirect
- google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532 // indirect
- google.golang.org/grpc v1.22.0 // indirect
+ golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 // indirect
+ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 // indirect
+ golang.org/x/image v0.0.0-20190829093649-6ea169446634 // indirect
+ golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5 // indirect
+ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect
+ golang.org/x/sys v0.0.0-20190829204830-5fe476d8906b // indirect
+ golang.org/x/tools v0.0.0-20190829210313-340205e581e5 // indirect
+ google.golang.org/api v0.9.0 // indirect
+ google.golang.org/appengine v1.6.2 // indirect
+ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
+ google.golang.org/grpc v1.23.0 // indirect
gopkg.in/yaml.v2 v2.2.2
+ honnef.co/go/tools v0.0.1-2019.2.2 // indirect
)
diff --git a/go.sum b/go.sum
index cf6e3205..61db8696 100644
--- a/go.sum
+++ b/go.sum
@@ -1,21 +1,39 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
+github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a h1:W8b4lQ4tFF21aspRGoBuCNV6V2fFJBF+pm1J6OY8Lys=
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -24,7 +42,9 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
@@ -35,77 +55,126 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.4 h1:5xLhQjsk4zqPf9EHCrja2qFZMx+yBqkO3XgJ14bNnU0=
github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.10.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.21.0 h1:wYSSj06510qPIzGSua9ZqsncMmWE3Zr55KBERygyrxE=
+github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
@@ -115,60 +184,135 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190829093649-6ea169446634/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190829204830-5fe476d8906b h1:GA/t9fariXOM5cIRJcMPxJHYYZmYHgXdVH0+JEzddZs=
+golang.org/x/sys v0.0.0-20190829204830-5fe476d8906b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190829210313-340205e581e5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532 h1:5pOB7se0B2+IssELuQUs6uoBgYJenkU2AQlvopc2sRw=
google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/internal/pkg/config/brick_manager.go b/internal/pkg/config/brick_manager.go
new file mode 100644
index 00000000..fecbf578
--- /dev/null
+++ b/internal/pkg/config/brick_manager.go
@@ -0,0 +1,33 @@
+package config
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "log"
+)
+
+type BrickManagerConfig struct {
+ BrickHostName datamodel.BrickHostName
+ PoolName datamodel.PoolName
+ DeviceCapacityGiB uint
+ DeviceCount uint
+ DeviceAddressPattern string
+ HostEnabled bool
+}
+
+// TODO: need additional validation here
+func GetBrickManagerConfig(env ReadEnvironemnt) BrickManagerConfig {
+ config := BrickManagerConfig{
+ datamodel.BrickHostName(getHostname(env)),
+ datamodel.PoolName(getString(env, "DAC_POOL_NAME", "default")),
+ getUint(env, "DAC_BRICK_CAPACITY_GB",
+ getUint(env, "DAC_DEVICE_CAPACITY_GB", 1400)),
+ getUint(env, "DAC_BRICK_COUNT",
+ getUint(env, "DEVICE_COUNT", 12)),
+ getString(env, "DAC_BRICK_ADDRESS_PATTERN",
+ getString(env, "DEVICE_TYPE", "nvme%dn1")),
+ // Disabled means don't accept new Sessions, but allow Actions on existing Sessions
+ getBool(env, "DAC_HOST_ENABLED", true),
+ }
+ log.Println("Got brick manager config:", config)
+ return config
+}
diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go
new file mode 100644
index 00000000..cf02598f
--- /dev/null
+++ b/internal/pkg/config/config.go
@@ -0,0 +1,68 @@
+package config
+
+import (
+ "log"
+ "os"
+ "strconv"
+)
+
+type ReadEnvironemnt interface {
+ LookupEnv(key string) (string, bool)
+ Hostname() (string, error)
+}
+
+func getHostname(env ReadEnvironemnt) string {
+ hostname, err := env.Hostname()
+ if err != nil {
+ log.Fatal(err)
+ }
+ return hostname
+}
+
+func getUint(env ReadEnvironemnt, key string, defaultVal uint) uint {
+ val, ok := env.LookupEnv(key)
+ if !ok {
+ return defaultVal
+ }
+ intVal, err := strconv.ParseUint(val, 10, 32)
+ if err != nil {
+ log.Printf("error parsing %s", key)
+ return defaultVal
+ }
+ return uint(intVal)
+}
+
+func getString(env ReadEnvironemnt, key string, defaultVal string) string {
+ val, ok := env.LookupEnv(key)
+ if !ok {
+ return defaultVal
+ }
+ return val
+}
+
+func getBool(env ReadEnvironemnt, key string, defaultVal bool) bool {
+ val, ok := env.LookupEnv(key)
+ if !ok {
+ return defaultVal
+ }
+ boolVal, err := strconv.ParseBool(val)
+ if err != nil {
+ log.Printf("error parsing %s", key)
+ return defaultVal
+ }
+ return boolVal
+}
+
+type systemEnv struct{}
+
+func (systemEnv) LookupEnv(key string) (string, bool) {
+ return os.LookupEnv(key)
+ //return "", false
+}
+
+func (systemEnv) Hostname() (string, error) {
+ return os.Hostname()
+ //return "hostname", nil
+}
+
+var DefaultEnv ReadEnvironemnt = systemEnv{}
diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go
new file mode 100644
index 00000000..1b824e8b
--- /dev/null
+++ b/internal/pkg/config/config_test.go
@@ -0,0 +1,20 @@
+package config
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestGetBrickManagerConfig(t *testing.T) {
+ config := GetBrickManagerConfig(DefaultEnv)
+
+ hostname, _ := os.Hostname()
+ assert.Equal(t, datamodel.BrickHostName(hostname), config.BrickHostName)
+ assert.Equal(t, uint(12), config.DeviceCount)
+ assert.Equal(t, datamodel.PoolName("default"), config.PoolName)
+ assert.Equal(t, true, config.HostEnabled)
+ assert.Equal(t, "nvme%dn1", config.DeviceAddressPattern)
+ assert.Equal(t, uint(1400), config.DeviceCapacityGiB)
+}
diff --git a/internal/pkg/config/filesystem.go b/internal/pkg/config/filesystem.go
new file mode 100644
index 00000000..39874e42
--- /dev/null
+++ b/internal/pkg/config/filesystem.go
@@ -0,0 +1,29 @@
+package config
+
+type FilesystemConfig struct {
+ MGSDevice string
+ MaxMDTs uint
+ HostGroup string
+ AnsibleDir string
+ SkipAnsible bool
+ LnetSuffix string
+ MDTSizeMB uint
+}
+
+func GetFilesystemConfig() FilesystemConfig {
+ env := DefaultEnv
+ conf := FilesystemConfig{
+ MGSDevice: getString(env, "DAC_MGS_DEV", "sdb"),
+ MaxMDTs: getUint(env, "DAC_MAX_MDT_COUNT", 24),
+ HostGroup: getString(env, "DAC_HOST_GROUP", "dac-prod"),
+ AnsibleDir: getString(env, "DAC_ANSIBLE_DIR", "/var/lib/data-acc/fs-ansible/"),
+ SkipAnsible: getBool(env, "DAC_SKIP_ANSIBLE", false),
+ LnetSuffix: getString(env, "DAC_LNET_SUFFIX", ""),
+ }
+ mdtSizeMB := getUint(env, "DAC_MDT_SIZE_GB", 0) * 1024
+ if mdtSizeMB == 0 {
+ mdtSizeMB = getUint(env, "DAC_MDT_SIZE_MB", uint(20*1024))
+ }
+ conf.MDTSizeMB = mdtSizeMB
+ return conf
+}
diff --git a/internal/pkg/config/keystore.go b/internal/pkg/config/keystore.go
new file mode 100644
index 00000000..fbf1a93e
--- /dev/null
+++ b/internal/pkg/config/keystore.go
@@ -0,0 +1,30 @@
+package config
+
+import (
+ "log"
+ "strings"
+)
+
+type KeystoreConfig struct {
+ Endpoints []string
+ CertFile string
+ KeyFile string
+ CAFile string
+}
+
+func GetKeystoreConfig(env ReadEnvironemnt) KeystoreConfig {
+ config := KeystoreConfig{
+ CertFile: getString(env, "ETCDCTL_CERT_FILE", ""),
+ KeyFile: getString(env, "ETCDCTL_KEY_FILE", ""),
+ CAFile: getString(env, "ETCDCTL_CA_FILE", ""),
+ }
+ endpointsStr := getString(env, "ETCDCTL_ENDPOINTS", "")
+ if endpointsStr == "" {
+ endpointsStr = getString(env, "ETCD_ENDPOINTS", "")
+ }
+ if endpointsStr == "" {
+ log.Fatalf("Must set ETCDCTL_ENDPOINTS environemnt variable, e.g. export ETCDCTL_ENDPOINTS=127.0.0.1:2379")
+ }
+ config.Endpoints = strings.Split(endpointsStr, ",")
+ return config
+}
diff --git a/internal/pkg/config/log.go b/internal/pkg/config/log.go
new file mode 100644
index 00000000..b8f31381
--- /dev/null
+++ b/internal/pkg/config/log.go
@@ -0,0 +1,5 @@
+package config
+
+func GetDacctlLog() string {
+ return getString(DefaultEnv, "DACCTL_LOG", "/var/log/dacctl.log")
+}
diff --git a/internal/pkg/dacctl/actions/actions.go b/internal/pkg/dacctl/actions/actions.go
deleted file mode 100644
index e336ac1a..00000000
--- a/internal/pkg/dacctl/actions/actions.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package actions
-
-import (
- "errors"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/lifecycle"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "strings"
-)
-
-type CliContext interface {
- String(name string) string
- Int(name string) int
-}
-
-type DacctlActions interface {
- CreatePersistentBuffer(c CliContext) error
- DeleteBuffer(c CliContext) error
- CreatePerJobBuffer(c CliContext) error
- ShowInstances() error
- ShowSessions() error
- ListPools() error
- ShowConfigurations() error
- ValidateJob(c CliContext) error
- RealSize(c CliContext) error
- DataIn(c CliContext) error
- Paths(c CliContext) error
- PreRun(c CliContext) error
- PostRun(c CliContext) error
- DataOut(c CliContext) error
-}
-
-func NewDacctlActions(
- poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry, disk fileio.Disk) DacctlActions {
-
- return &dacctlActions{poolRegistry, volumeRegistry, disk}
-}
-
-type dacctlActions struct {
- poolRegistry registry.PoolRegistry
- volumeRegistry registry.VolumeRegistry
- disk fileio.Disk
-}
-
-func (fwa *dacctlActions) CreatePersistentBuffer(c CliContext) error {
- checkRequiredStrings(c, "token", "caller", "capacity", "user", "access", "type")
- request := dacctl.BufferRequest{Token: c.String("token"), Caller: c.String("caller"),
- Capacity: c.String("capacity"), User: c.Int("user"),
- Group: c.Int("groupid"), Access: dacctl.AccessModeFromString(c.String("access")),
- Type: dacctl.BufferTypeFromString(c.String("type")), Persistent: true}
- if request.Group == 0 {
- request.Group = request.User
- }
- err := dacctl.CreateVolumesAndJobs(fwa.volumeRegistry, fwa.poolRegistry, request)
- if err == nil {
- // Slurm is looking for the string "created" to know this worked
- fmt.Printf("created %s\n", request.Token)
- }
- return err
-}
-
-func checkRequiredStrings(c CliContext, flags ...string) {
- errs := []string{}
- for _, flag := range flags {
- if str := c.String(flag); str == "" {
- errs = append(errs, flag)
- }
- }
- if len(errs) > 0 {
- log.Fatalf("Please provide these required parameters: %s", strings.Join(errs, ", "))
- }
-}
-
-func (fwa *dacctlActions) DeleteBuffer(c CliContext) error {
- checkRequiredStrings(c, "token")
- token := c.String("token")
- return dacctl.DeleteBufferComponents(fwa.volumeRegistry, fwa.poolRegistry, token)
-}
-
-func (fwa *dacctlActions) CreatePerJobBuffer(c CliContext) error {
- checkRequiredStrings(c, "token", "job", "caller", "capacity")
- return dacctl.CreatePerJobBuffer(fwa.volumeRegistry, fwa.poolRegistry, fwa.disk,
- c.String("token"), c.Int("user"), c.Int("group"), c.String("capacity"),
- c.String("caller"), c.String("job"), c.String("nodehostnamefile"))
-}
-
-func (fwa *dacctlActions) ShowInstances() error {
- instances, err := dacctl.GetInstances(fwa.volumeRegistry)
- if err != nil {
- return err
- }
- fmt.Println(instances)
- return nil
-}
-
-func (fwa *dacctlActions) ShowSessions() error {
- sessions, err := dacctl.GetSessions(fwa.volumeRegistry)
- if err != nil {
- return err
- }
- fmt.Println(sessions)
- return nil
-}
-
-func (fwa *dacctlActions) ListPools() error {
- pools, err := dacctl.GetPools(fwa.poolRegistry)
- if err != nil {
- return err
- }
- fmt.Println(pools)
- return nil
-}
-
-func (fwa *dacctlActions) ShowConfigurations() error {
- fmt.Print(dacctl.GetConfigurations())
- return nil
-}
-
-func (fwa *dacctlActions) ValidateJob(c CliContext) error {
- checkRequiredStrings(c, "job")
- if summary, err := dacctl.ParseJobFile(fwa.disk, c.String("job")); err != nil {
- return err
- } else {
- // TODO check valid pools, etc, etc.
- log.Println("Summary of job file:", summary)
- }
- return nil
-}
-
-func (fwa *dacctlActions) RealSize(c CliContext) error {
- checkRequiredStrings(c, "token")
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- if job.JobVolume == "" {
- return fmt.Errorf("no volume to report the size of: %s", job.Name)
- }
-
- volume, err := fwa.volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
- // TODO get GiB vs GB correct here!
- fmt.Printf(`{"token":"%s", "capacity":%d, "units":"bytes"}`, volume.Name, volume.SizeGB*1073741824)
- return nil
-}
-
-func (fwa *dacctlActions) DataIn(c CliContext) error {
- checkRequiredStrings(c, "token")
- fmt.Printf("--token %s --job %s\n", c.String("token"), c.String("job"))
-
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- if job.JobVolume == "" {
- log.Print("No data in required")
- return nil
- }
-
- volume, err := fwa.volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
-
- vlm := lifecycle.NewVolumeLifecycleManager(fwa.volumeRegistry, fwa.poolRegistry, volume)
- return vlm.DataIn()
-}
-
-func (fwa *dacctlActions) Paths(c CliContext) error {
- checkRequiredStrings(c, "token", "pathfile")
- fmt.Printf("--token %s --job %s --pathfile %s\n",
- c.String("token"), c.String("job"), c.String("pathfile"))
-
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- paths := []string{}
- for key, value := range job.Paths {
- paths = append(paths, fmt.Sprintf("%s=%s", key, value))
- }
- return fwa.disk.Write(c.String("pathfile"), paths)
-}
-
-var testVLM lifecycle.VolumeLifecycleManager
-
-func (fwa *dacctlActions) getVolumeLifecycleManger(volume registry.Volume) lifecycle.VolumeLifecycleManager {
- if testVLM != nil {
- return testVLM
- }
- return lifecycle.NewVolumeLifecycleManager(fwa.volumeRegistry, fwa.poolRegistry, volume)
-}
-
-func (fwa *dacctlActions) PreRun(c CliContext) error {
- checkRequiredStrings(c, "token", "nodehostnamefile")
- fmt.Printf("--token %s --job %s --nodehostnamefile %s\n",
- c.String("token"), c.String("job"), c.String("nodehostnamefile"))
-
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- hosts, err := fwa.disk.Lines(c.String("nodehostnamefile"))
- if err != nil {
- return err
- }
- if len(hosts) < 1 {
- return errors.New("unable to mount to zero compute hosts")
- }
-
- err = fwa.volumeRegistry.JobAttachHosts(job.Name, hosts)
- if err != nil {
- return err
- }
-
- if job.JobVolume == "" {
- log.Print("No job volume to mount")
- } else {
- volume, err := fwa.volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
- vlm := fwa.getVolumeLifecycleManger(volume)
- if err := vlm.Mount(hosts, job.Name); err != nil {
- return err
- }
- }
-
- for _, volumeName := range job.MultiJobVolumes {
- volume, err := fwa.volumeRegistry.Volume(volumeName)
- if err != nil {
- return err
- }
- vlm := fwa.getVolumeLifecycleManger(volume)
- if err := vlm.Mount(hosts, job.Name); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (fwa *dacctlActions) PostRun(c CliContext) error {
- checkRequiredStrings(c, "token")
- fmt.Printf("--token %s --job %s\n",
- c.String("token"), c.String("job"))
-
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- if job.JobVolume == "" {
- log.Print("No job volume to unmount")
- } else {
- volume, err := fwa.volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
- vlm := lifecycle.NewVolumeLifecycleManager(fwa.volumeRegistry, fwa.poolRegistry, volume)
- if err := vlm.Unmount(job.AttachHosts, job.Name); err != nil {
- return err
- }
- }
-
- for _, volumeName := range job.MultiJobVolumes {
- volume, err := fwa.volumeRegistry.Volume(volumeName)
- if err != nil {
- return err
- }
- vlm := lifecycle.NewVolumeLifecycleManager(fwa.volumeRegistry, fwa.poolRegistry, volume)
- if err := vlm.Unmount(job.AttachHosts, job.Name); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (fwa *dacctlActions) DataOut(c CliContext) error {
- checkRequiredStrings(c, "token")
- fmt.Printf("--token %s --job %s\n",
- c.String("token"), c.String("job"))
-
- job, err := fwa.volumeRegistry.Job(c.String("token"))
- if err != nil {
- return err
- }
-
- if job.JobVolume == "" {
- log.Print("No data out required")
- return nil
- }
-
- volume, err := fwa.volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
-
- vlm := lifecycle.NewVolumeLifecycleManager(fwa.volumeRegistry, fwa.poolRegistry, volume)
- return vlm.DataOut()
-}
diff --git a/internal/pkg/dacctl/actions/actions_test.go b/internal/pkg/dacctl/actions/actions_test.go
deleted file mode 100644
index 4660388c..00000000
--- a/internal/pkg/dacctl/actions/actions_test.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package actions
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/mocks"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "testing"
- "time"
-)
-
-type mockCliContext struct {
- capacity int
-}
-
-func (c *mockCliContext) String(name string) string {
- switch name {
- case "capacity":
- return fmt.Sprintf("pool1:%dGB", c.capacity)
- case "token":
- return "token"
- case "caller":
- return "caller"
- case "user":
- return "user"
- case "access":
- return "access"
- case "type":
- return "type"
- case "job":
- return "jobfile"
- case "nodehostnamefile":
- return "nodehostnamefile1"
- case "pathfile":
- return "pathfile1"
- default:
- return ""
- }
-}
-
-func (c *mockCliContext) Int(name string) int {
- switch name {
- case "user":
- return 1001
- case "group":
- return 1001
- default:
- return 42 + len(name)
- }
-}
-
-func TestCreatePersistentBufferReturnsError(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockObj := mocks.NewMockVolumeRegistry(mockCtrl)
- mockObj.EXPECT().AddVolume(gomock.Any()) // TODO
- mockObj.EXPECT().AddJob(gomock.Any())
- mockPool := mocks.NewMockPoolRegistry(mockCtrl)
- mockPool.EXPECT().Pools().DoAndReturn(func() ([]registry.Pool, error) {
- return []registry.Pool{{Name: "pool1", GranularityGB: 1}}, nil
- })
-
- mockPool.EXPECT().AllocateBricksForVolume(gomock.Any())
- mockCtxt := &mockCliContext{}
-
- actions := NewDacctlActions(mockPool, mockObj, nil)
-
- err := actions.CreatePersistentBuffer(mockCtxt)
- assert.Nil(t, err)
-}
-
-func TestDacctlActions_PreRun(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
- mockDisk := mocks.NewMockDisk(mockCtrl)
- mockCtxt := &mockCliContext{}
- actions := NewDacctlActions(nil, mockVolReg, mockDisk)
- testVLM = &mockVLM{}
- defer func() { testVLM = nil }()
-
- mockDisk.EXPECT().Lines("nodehostnamefile1").DoAndReturn(func(string) ([]string, error) {
- return []string{"host1", "host2"}, nil
- })
- mockVolReg.EXPECT().Job("token").DoAndReturn(
- func(name string) (registry.Job, error) {
- return registry.Job{
- Name: "token",
- JobVolume: registry.VolumeName("token"),
- MultiJobVolumes: []registry.VolumeName{registry.VolumeName("othervolume")},
- }, nil
- })
- mockVolReg.EXPECT().JobAttachHosts("token", []string{"host1", "host2"})
- mockVolReg.EXPECT().Volume(registry.VolumeName("token"))
- mockVolReg.EXPECT().Volume(registry.VolumeName("othervolume")).DoAndReturn(
- func(name registry.VolumeName) (registry.Volume, error) {
- return registry.Volume{Name: name}, nil
- })
-
- err := actions.PreRun(mockCtxt)
- assert.Nil(t, err)
-}
-
-func TestDacctlActions_Paths(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
- mockDisk := mocks.NewMockDisk(mockCtrl)
- mockCtxt := &mockCliContext{}
- actions := NewDacctlActions(nil, mockVolReg, mockDisk)
- testVLM = &mockVLM{}
- defer func() { testVLM = nil }()
-
- mockVolReg.EXPECT().Job("token").DoAndReturn(
- func(name string) (registry.Job, error) {
- return registry.Job{JobVolume: registry.VolumeName("token"), Paths: map[string]string{"a": "A"}}, nil
- })
- mockDisk.EXPECT().Write("pathfile1", []string{"a=A"})
-
- err := actions.Paths(mockCtxt)
- assert.Nil(t, err)
-}
-
-func TestDacctlActions_CreatePerJobBuffer(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockPoolReg := mocks.NewMockPoolRegistry(mockCtrl)
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
- mockDisk := mocks.NewMockDisk(mockCtrl)
- mockCtxt := &mockCliContext{capacity: 300}
- actions := NewDacctlActions(mockPoolReg, mockVolReg, mockDisk)
-
- mockDisk.EXPECT().Lines("jobfile").DoAndReturn(func(string) ([]string, error) {
- return []string{
- "#DW persistentdw name=mybuffer",
- "#DW jobdw capacity=2GB access_mode=striped,private type=scratch",
- }, nil
- })
-
- mockPoolReg.EXPECT().Pools().DoAndReturn(func() ([]registry.Pool, error) {
- return []registry.Pool{{Name: "pool1", GranularityGB: 200}}, nil
- })
- mockVolReg.EXPECT().Volume(registry.VolumeName("mybuffer")).DoAndReturn(
- func(name registry.VolumeName) (registry.Volume, error) {
- return registry.Volume{Name: name, MultiJob: true}, nil
- })
- expectedVolume := registry.Volume{
- Name: "token",
- MultiJob: false,
- State: registry.Registered,
- Pool: "pool1",
- SizeBricks: 2,
- SizeGB: 400,
- JobName: "token",
- Owner: 1001,
- Group: 1001,
- CreatedBy: "caller",
- CreatedAt: uint(time.Now().Unix()), // TODO this is racey!
- AttachGlobalNamespace: true,
- AttachPrivateNamespace: true,
- AttachAsSwapBytes: 0,
- }
- mockVolReg.EXPECT().AddVolume(expectedVolume)
- mockVolReg.EXPECT().AddJob(registry.Job{
- Name: "token",
- Owner: 1001,
- CreatedAt: uint(time.Now().Unix()),
- Paths: map[string]string{
- "DW_PERSISTENT_STRIPED_mybuffer": "/dac/token_persistent_mybuffer",
- "DW_JOB_PRIVATE": "/dac/token_job_private",
- "DW_JOB_STRIPED": "/dac/token_job/global",
- },
- JobVolume: registry.VolumeName("token"),
- MultiJobVolumes: []registry.VolumeName{"mybuffer"},
- })
- mockVolReg.EXPECT().Volume(registry.VolumeName("token")).DoAndReturn(
- func(name registry.VolumeName) (registry.Volume, error) {
- return registry.Volume{
- Name: name,
- SizeBricks: 0, // TODO: skips ProvisionBricks logic
- }, nil
- })
- // TODO: sort out the volume passed here!
- mockPoolReg.EXPECT().AllocateBricksForVolume(gomock.Any())
-
- err := actions.CreatePerJobBuffer(mockCtxt)
- assert.Nil(t, err)
-}
-
-type mockVLM struct{}
-
-func (*mockVLM) ProvisionBricks() error {
- panic("implement me")
-}
-
-func (*mockVLM) DataIn() error {
- panic("implement me")
-}
-
-func (*mockVLM) Mount(hosts []string, jobName string) error {
- return nil
-}
-
-func (*mockVLM) Unmount(hosts []string, jobName string) error {
- panic("implement me")
-}
-
-func (*mockVLM) DataOut() error {
- panic("implement me")
-}
-
-func (*mockVLM) Delete() error {
- panic("implement me")
-}
diff --git a/internal/pkg/dacctl/actions_impl/actions.go b/internal/pkg/dacctl/actions_impl/actions.go
new file mode 100644
index 00000000..89f15668
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/actions.go
@@ -0,0 +1,127 @@
+package actions_impl
+
+import (
+ "errors"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ parsers2 "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/workflow_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+ "strings"
+)
+
+func NewDacctlActions(keystore store.Keystore, disk fileio.Disk) dacctl.DacctlActions {
+ return &dacctlActions{
+ session: workflow_impl.NewSessionFacade(keystore),
+ disk: disk,
+ }
+}
+
+type dacctlActions struct {
+ session facade.Session
+ disk fileio.Disk
+}
+
+func checkRequiredStrings(c dacctl.CliContext, flags ...string) error {
+ var errs []string
+ for _, flag := range flags {
+ if str := c.String(flag); str == "" {
+ errs = append(errs, flag)
+ }
+ }
+ if len(errs) > 0 {
+ errStr := fmt.Sprintf("Please provide these required parameters: %s", strings.Join(errs, ", "))
+ log.Println(errStr)
+ return errors.New(errStr)
+ }
+ return nil
+}
+
+func (d *dacctlActions) getSessionName(c dacctl.CliContext) (datamodel.SessionName, error) {
+ err := checkRequiredStrings(c, "token")
+ if err != nil {
+ return "", err
+ }
+
+ token := c.String("token")
+ if !parsers2.IsValidName(token) {
+ return "", fmt.Errorf("badly formatted session name: %s", token)
+ }
+
+ return datamodel.SessionName(token), nil
+}
+
+func (d *dacctlActions) DeleteBuffer(c dacctl.CliContext) error {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return err
+ }
+ hurry := c.Bool("hurry")
+ return d.session.DeleteSession(sessionName, hurry)
+}
+
+func (d *dacctlActions) DataIn(c dacctl.CliContext) error {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return err
+ }
+ return d.session.CopyDataIn(sessionName)
+}
+
+func (d *dacctlActions) PreRun(c dacctl.CliContext) error {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return err
+ }
+ err = checkRequiredStrings(c, "nodehostnamefile")
+ if err != nil {
+ return err
+ }
+
+ computeHosts, err := parsers2.GetHostnamesFromFile(d.disk, c.String("nodehostnamefile"))
+ if err != nil {
+ return err
+ }
+ if len(computeHosts) < 1 {
+ return errors.New("unable to mount to zero compute hosts")
+ }
+
+ loginNodeFilename := c.String("jobexecutionnodefile")
+ var loginNodeHosts []string
+ if loginNodeFilename != "" {
+ loginNodeHosts, err = parsers2.GetHostnamesFromFile(d.disk, loginNodeFilename)
+ if err != nil {
+ return err
+ }
+ }
+
+ return d.session.Mount(sessionName, computeHosts, loginNodeHosts)
+}
+
+func (d *dacctlActions) PostRun(c dacctl.CliContext) error {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return err
+ }
+ return d.session.Unmount(sessionName)
+}
+
+func (d *dacctlActions) DataOut(c dacctl.CliContext) error {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return err
+ }
+ return d.session.CopyDataOut(sessionName)
+}
+
+func (d *dacctlActions) GenerateAnsible(c dacctl.CliContext) (string, error) {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return "", err
+ }
+ return d.session.GenerateAnsible(sessionName)
+}
diff --git a/internal/pkg/dacctl/actions_impl/actions_test.go b/internal/pkg/dacctl/actions_impl/actions_test.go
new file mode 100644
index 00000000..92eece11
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/actions_test.go
@@ -0,0 +1,244 @@
+package actions_impl
+
+import (
+ "errors"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_fileio"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+type mockCliContext struct {
+ strings map[string]string
+ integers map[string]int
+ booleans map[string]bool
+}
+
+func (c *mockCliContext) String(name string) string {
+ return c.strings[name]
+}
+
+func (c *mockCliContext) Int(name string) int {
+ return c.integers[name]
+}
+
+func (c *mockCliContext) Bool(name string) bool {
+ return c.booleans[name]
+}
+
+func getMockCliContext(capacity int) *mockCliContext {
+ ctxt := mockCliContext{}
+ ctxt.strings = map[string]string{
+ "capacity": fmt.Sprintf("pool1:%dGiB", capacity),
+ "token": "token",
+ "caller": "caller",
+ "access": "asdf",
+ "type": "type",
+ "job": "jobfile",
+ "nodehostnamefile": "nodehostnamefile1",
+ "pathfile": "pathfile1",
+ }
+ ctxt.integers = map[string]int{
+ "user": 1001,
+ "group": 1002,
+ }
+ return &ctxt
+}
+
+func TestDacctlActions_DeleteBuffer(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+
+ fakeError := errors.New("fake")
+ session.EXPECT().DeleteSession(datamodel.SessionName("bar"), true).Return(fakeError)
+
+ actions := dacctlActions{session: session}
+ err := actions.DeleteBuffer(&mockCliContext{
+ strings: map[string]string{"token": "bar"},
+ booleans: map[string]bool{"hurry": true},
+ })
+
+ assert.Equal(t, fakeError, err)
+
+ err = actions.DeleteBuffer(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+}
+
+func TestDacctlActions_DataIn(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+
+ fakeError := errors.New("fake")
+ session.EXPECT().CopyDataIn(datamodel.SessionName("bar")).Return(fakeError)
+
+ actions := dacctlActions{session: session}
+ err := actions.DataIn(&mockCliContext{
+ strings: map[string]string{"token": "bar"},
+ })
+
+ assert.Equal(t, fakeError, err)
+
+ err = actions.DataIn(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+
+ err = actions.DataIn(&mockCliContext{
+ strings: map[string]string{"token": "bad token"},
+ })
+ assert.Equal(t, "badly formatted session name: bad token", err.Error())
+}
+
+func TestDacctlActions_DataOut(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+
+ fakeError := errors.New("fake")
+ session.EXPECT().CopyDataOut(datamodel.SessionName("bar")).Return(fakeError)
+
+ actions := dacctlActions{session: session}
+ err := actions.DataOut(&mockCliContext{
+ strings: map[string]string{"token": "bar"},
+ })
+
+ assert.Equal(t, fakeError, err)
+
+ err = actions.DataOut(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+}
+
+func TestDacctlActions_PreRun(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ computeHosts := []string{"host1", "host2"}
+ loginHosts := []string{"login"}
+ disk.EXPECT().Lines("computehostfile").Return(computeHosts, nil)
+ disk.EXPECT().Lines("loginhostfile").Return(loginHosts, nil)
+ fakeError := errors.New("fake")
+ session.EXPECT().Mount(datamodel.SessionName("bar"), computeHosts, loginHosts).Return(fakeError)
+
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.PreRun(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "nodehostnamefile": "computehostfile",
+ "jobexecutionnodefile": "loginhostfile",
+ },
+ })
+
+ assert.Equal(t, fakeError, err)
+
+ err = actions.PreRun(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+
+ err = actions.PreRun(&mockCliContext{strings: map[string]string{"token": "bar"}})
+ assert.Equal(t, "Please provide these required parameters: nodehostnamefile", err.Error())
+}
+
+func TestDacctlActions_PreRun_NoLoginHosts(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ computeHosts := []string{"host1", "host2"}
+ disk.EXPECT().Lines("computehostfile").Return(computeHosts, nil)
+ fakeError := errors.New("fake")
+ session.EXPECT().Mount(datamodel.SessionName("bar"), computeHosts, nil).Return(fakeError)
+
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.PreRun(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "nodehostnamefile": "computehostfile",
+ },
+ })
+
+ assert.Equal(t, fakeError, err)
+}
+
+func TestDacctlActions_PreRun_BadHosts(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ computeHosts := []string{"host1", "host/2"}
+ disk.EXPECT().Lines("computehostfile").Return(computeHosts, nil)
+
+ actions := dacctlActions{disk: disk}
+ err := actions.PreRun(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "nodehostnamefile": "computehostfile",
+ },
+ })
+
+ assert.Equal(t, "invalid hostname in: [host/2]", err.Error())
+}
+
+func TestDacctlActions_PreRun_BadLoginHosts(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ computeHosts := []string{"host1", "host2"}
+ loginHosts := []string{"login/asdf"}
+ disk.EXPECT().Lines("computehostfile").Return(computeHosts, nil)
+ disk.EXPECT().Lines("loginhostfile").Return(loginHosts, nil)
+
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.PreRun(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "nodehostnamefile": "computehostfile",
+ "jobexecutionnodefile": "loginhostfile",
+ },
+ })
+
+ assert.Equal(t, "invalid hostname in: [login/asdf]", err.Error())
+}
+
+func TestDacctlActions_PreRun_NoHosts(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ disk.EXPECT().Lines("computehostfile").Return(nil, nil)
+
+ actions := NewDacctlActions(nil, disk)
+ err := actions.PreRun(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "nodehostnamefile": "computehostfile",
+ },
+ })
+
+ assert.Equal(t, "unable to mount to zero compute hosts", err.Error())
+}
+
+func TestDacctlActions_PostRun(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+
+ fakeError := errors.New("fake")
+ session.EXPECT().Unmount(datamodel.SessionName("bar")).Return(fakeError)
+
+ actions := dacctlActions{session: session}
+ err := actions.PostRun(&mockCliContext{
+ strings: map[string]string{"token": "bar"},
+ })
+
+ assert.Equal(t, fakeError, err)
+
+ err = actions.PostRun(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+}
diff --git a/internal/pkg/dacctl/actions_impl/configurations.go b/internal/pkg/dacctl/actions_impl/configurations.go
new file mode 100644
index 00000000..c9a054c8
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/configurations.go
@@ -0,0 +1,17 @@
+package actions_impl
+
+import (
+ "encoding/json"
+ "log"
+)
+
+type configurations []string
+
+func configurationToString(list configurations) string {
+ message := map[string]configurations{"configurations": list}
+ output, err := json.Marshal(message)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ return string(output)
+}
diff --git a/internal/pkg/dacctl/actions_impl/instances.go b/internal/pkg/dacctl/actions_impl/instances.go
new file mode 100644
index 00000000..6a28c490
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/instances.go
@@ -0,0 +1,32 @@
+package actions_impl
+
+import (
+ "encoding/json"
+ "github.com/prometheus/common/log"
+)
+
+type instanceCapacity struct {
+ Bytes uint `json:"bytes"`
+ Nodes uint `json:"nodes"`
+}
+
+type instanceLinks struct {
+ Session string `json:"session"`
+}
+
+type instance struct {
+ Id string `json:"id"`
+ Capacity instanceCapacity `json:"capacity"`
+ Links instanceLinks `json:"links"`
+}
+
+type instances []instance
+
+func instancesToString(list []instance) string {
+ message := map[string]instances{"instances": list}
+ output, err := json.Marshal(message)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ return string(output)
+}
diff --git a/internal/pkg/dacctl/actions_impl/job.go b/internal/pkg/dacctl/actions_impl/job.go
new file mode 100644
index 00000000..d96f1b4e
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/job.go
@@ -0,0 +1,111 @@
+package actions_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ parsers2 "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "log"
+ "sort"
+)
+
+func (d *dacctlActions) ValidateJob(c dacctl.CliContext) error {
+ err := checkRequiredStrings(c, "job")
+ if err != nil {
+ return err
+ }
+
+ jobFile := c.String("job")
+ summary, err := parsers2.ParseJobFile(d.disk, jobFile)
+ if err != nil {
+ return err
+ } else {
+ // TODO check valid pools, etc, etc.
+ log.Println("Summary of job file:", summary)
+ }
+ return nil
+}
+
+func (d *dacctlActions) CreatePerJobBuffer(c dacctl.CliContext) error {
+ checkRequiredStrings(c, "token", "job", "caller", "capacity")
+ // TODO: need to specify user and group too
+
+ jobFile := c.String("job")
+ summary, err := parsers2.ParseJobFile(d.disk, jobFile)
+ if err != nil {
+ return err
+ }
+
+ nodeFile := c.String("nodehostnamefile")
+ if nodeFile != "" {
+ // TODO we could add this into the volume as a scheduling hint, when its available?
+ log.Printf("Ignoring nodeFile in setup: %s", nodeFile)
+ }
+
+ pool, capacityBytes, err := parsers2.ParseCapacityBytes(c.String("capacity"))
+ if err != nil {
+ return err
+ }
+
+ // extract info from job file
+ swapBytes := 0
+ if summary.Swap != nil {
+ swapBytes = summary.Swap.SizeBytes
+ }
+ access := datamodel.NoAccess
+ bufferType := datamodel.Scratch
+ if summary.PerJobBuffer != nil {
+ access = summary.PerJobBuffer.AccessMode
+ if summary.PerJobBuffer.BufferType != datamodel.Scratch {
+ return fmt.Errorf("cache is not supported")
+ }
+ }
+ var multiJobVolumes []datamodel.SessionName
+ for _, attachment := range summary.Attachments {
+ multiJobVolumes = append(multiJobVolumes, attachment)
+ }
+
+ request := datamodel.VolumeRequest{
+ MultiJob: false,
+ Caller: c.String("caller"),
+ TotalCapacityBytes: capacityBytes,
+ PoolName: datamodel.PoolName(pool),
+ Access: access,
+ Type: bufferType,
+ SwapBytes: swapBytes,
+ }
+ // TODO: must be a better way!
+ // ensure multi job volumes are sorted, to avoid deadlocks (*cough*)
+ sort.Slice(multiJobVolumes, func(i, j int) bool {
+ return multiJobVolumes[i] < multiJobVolumes[j]
+ })
+ session := datamodel.Session{
+ Name: datamodel.SessionName(c.String("token")),
+ Owner: uint(c.Int("user")),
+ Group: uint(c.Int("group")),
+ CreatedAt: getNow(),
+ VolumeRequest: request,
+ MultiJobAttachments: multiJobVolumes,
+ StageInRequests: summary.DataIn,
+ StageOutRequests: summary.DataOut,
+ }
+ session.Paths = getPaths(session)
+ return d.session.CreateSession(session)
+}
+
+func getPaths(session datamodel.Session) map[string]string {
+ paths := make(map[string]string)
+ if session.VolumeRequest.MultiJob == false {
+ if session.VolumeRequest.Access == datamodel.Private || session.VolumeRequest.Access == datamodel.PrivateAndStriped {
+ paths["DW_JOB_PRIVATE"] = fmt.Sprintf("/dac/%s_job_private", session.Name)
+ }
+ if session.VolumeRequest.Access == datamodel.Striped || session.VolumeRequest.Access == datamodel.PrivateAndStriped {
+ paths["DW_JOB_STRIPED"] = fmt.Sprintf("/dac/%s_job/global", session.Name)
+ }
+ }
+ for _, multiJobVolume := range session.MultiJobAttachments {
+ paths[fmt.Sprintf("DW_PERSISTENT_STRIPED_%s", multiJobVolume)] = fmt.Sprintf(
+ "/dac/%s_persistent_%s/global", session.Name, multiJobVolume)
+ }
+ return paths
+}
diff --git a/internal/pkg/dacctl/actions_impl/job_test.go b/internal/pkg/dacctl/actions_impl/job_test.go
new file mode 100644
index 00000000..7be98364
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/job_test.go
@@ -0,0 +1,94 @@
+package actions_impl
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_fileio"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestDacctlActions_ValidateJob_BadInput(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ lines := []string{`#DW bad cmd`}
+ disk.EXPECT().Lines("jobfile").Return(lines, nil)
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.ValidateJob(&mockCliContext{
+ strings: map[string]string{
+ "job": "jobfile",
+ },
+ })
+
+ assert.Equal(t, "unrecognised command: bad with arguments: [cmd]", err.Error())
+
+ err = actions.ValidateJob(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: job", err.Error())
+}
+
+func TestDacctlActions_CreatePerJobBuffer(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ lines := []string{
+ `#DW jobdw capacity=4MiB access_mode=striped,private type=scratch`,
+ `#DW persistentdw name=myBBname2`,
+ `#DW persistentdw name=myBBname1`,
+ `#DW swap 4MiB`,
+ `#DW stage_in source=/global/cscratch1/filename1 destination=$DW_JOB_STRIPED/filename1 type=file`,
+ `#DW stage_in source=/global/cscratch1/filelist type=list`,
+ `#DW stage_out source=$DW_JOB_STRIPED/outdir destination=/global/scratch1/outdir type=directory`,
+ }
+ disk.EXPECT().Lines("jobfile").Return(lines, nil)
+ session.EXPECT().CreateSession(datamodel.Session{
+ Name: "token",
+ Owner: 1001,
+ Group: 1002,
+ CreatedAt: 123,
+ MultiJobAttachments: []datamodel.SessionName{"myBBname1", "myBBname2"},
+ StageInRequests: []datamodel.DataCopyRequest{
+ {
+ SourceType: datamodel.File,
+ Source: "/global/cscratch1/filename1",
+ Destination: "$DW_JOB_STRIPED/filename1",
+ },
+ {
+ SourceType: datamodel.List,
+ Source: "/global/cscratch1/filelist",
+ },
+ },
+ StageOutRequests: []datamodel.DataCopyRequest{
+ {
+ SourceType: datamodel.Directory,
+ Source: "$DW_JOB_STRIPED/outdir",
+ Destination: "/global/scratch1/outdir",
+ },
+ },
+ VolumeRequest: datamodel.VolumeRequest{
+ Caller: "caller",
+ PoolName: "pool1",
+ TotalCapacityBytes: 2147483648,
+ Access: datamodel.PrivateAndStriped,
+ Type: datamodel.Scratch,
+ SwapBytes: 4194304,
+ },
+ Paths: map[string]string{
+ "DW_JOB_PRIVATE": "/dac/token_job_private",
+ "DW_JOB_STRIPED": "/dac/token_job/global",
+ "DW_PERSISTENT_STRIPED_myBBname1": "/dac/token_persistent_myBBname1/global",
+ "DW_PERSISTENT_STRIPED_myBBname2": "/dac/token_persistent_myBBname2/global",
+ },
+ }).Return(nil)
+
+ fakeTime = 123
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.CreatePerJobBuffer(getMockCliContext(2))
+
+ assert.Nil(t, err)
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/capacity.go b/internal/pkg/dacctl/actions_impl/parsers/capacity.go
new file mode 100644
index 00000000..08b1b01a
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/capacity.go
@@ -0,0 +1,64 @@
+package parsers
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "math"
+ "strconv"
+ "strings"
+)
+
+// TODO: missing a few?
+var sizeSuffixMultiplier = map[string]uint{
+ "TiB": 1099511627776,
+ "TB": 1000000000000,
+ "GiB": 1073741824,
+ "GB": 1000000000,
+ "MiB": 1048576,
+ "MB": 1000000,
+}
+
+// TODO: test me!!
+func GetBytes(value uint, unit string) uint {
+ multiplier, ok := sizeSuffixMultiplier[unit]
+ if !ok {
+ log.Panicf("unrecognised unit")
+ }
+ return value * multiplier
+}
+
+// TODO: why not uint?
+func ParseSize(raw string) (int, error) {
+ intVal, err := strconv.Atoi(raw)
+ if err == nil {
+ // specified raw bytes
+ return intVal, nil
+ }
+ for suffix, multiplier := range sizeSuffixMultiplier {
+ if strings.HasSuffix(raw, suffix) {
+ rawInt := strings.TrimSpace(strings.TrimSuffix(raw, suffix))
+ floatVal, err := strconv.ParseFloat(rawInt, 64)
+ if err != nil {
+ return 0, err
+ }
+ floatBytes := floatVal * float64(multiplier)
+ return int(math.Ceil(floatBytes)), nil
+ }
+ }
+ return 0, fmt.Errorf("unable to parse size: %s", raw)
+}
+
+func ParseCapacityBytes(raw string) (string, int, error) {
+ parts := strings.Split(raw, ":")
+ if len(parts) != 2 {
+ return "", 0, errors.New("must format capacity correctly and include pool")
+ }
+ pool := strings.TrimSpace(parts[0])
+ rawCapacity := strings.TrimSpace(parts[1])
+ sizeBytes, err := ParseSize(rawCapacity)
+ if err != nil {
+ return "", 0, err
+ }
+ return pool, sizeBytes, nil
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/capacity_test.go b/internal/pkg/dacctl/actions_impl/parsers/capacity_test.go
new file mode 100644
index 00000000..b130621f
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/capacity_test.go
@@ -0,0 +1,65 @@
+package parsers
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestParseSize(t *testing.T) {
+ size, err := ParseSize("10GiB")
+ assert.Nil(t, err)
+ assert.Equal(t, 10737418240, size)
+
+ size, err = ParseSize("10GB")
+ assert.Nil(t, err)
+ assert.Equal(t, 10000000000, size)
+
+ size, err = ParseSize("10 GB")
+ assert.Nil(t, err)
+ assert.Equal(t, 10000000000, size)
+
+ size, err = ParseSize("10B")
+ assert.Equal(t, "unable to parse size: 10B", err.Error())
+
+ size, err = ParseSize("10.1234567MB")
+ assert.Nil(t, err)
+ assert.Equal(t, 10123457, size)
+
+ size, err = ParseSize("1TiB")
+ assert.Nil(t, err)
+ assert.Equal(t, 1099511627776, size)
+
+ size, err = ParseSize("1TB")
+ assert.Nil(t, err)
+ assert.Equal(t, 1000000000000, size)
+
+ size, err = ParseSize("1MiB")
+ assert.Nil(t, err)
+ assert.Equal(t, 1048576, size)
+
+ size, err = ParseSize("AMiB")
+ assert.Equal(t, "strconv.ParseFloat: parsing \"A\": invalid syntax", err.Error())
+ assert.Equal(t, 0, size)
+}
+
+func TestParseCapacityBytes(t *testing.T) {
+ pool, size, err := ParseCapacityBytes("foo:1MB")
+ assert.Nil(t, err)
+ assert.Equal(t, "foo", pool)
+ assert.Equal(t, 1000000, size)
+
+ pool, size, err = ParseCapacityBytes("foo : 1 MB")
+ assert.Equal(t, "foo", pool)
+ assert.Equal(t, 1000000, size)
+ assert.Nil(t, err)
+
+ pool, size, err = ParseCapacityBytes("foo1MB")
+ assert.Equal(t, "must format capacity correctly and include pool", err.Error())
+ assert.Equal(t, "", pool)
+ assert.Equal(t, 0, size)
+
+ pool, size, err = ParseCapacityBytes("foo:1B")
+ assert.Equal(t, "unable to parse size: 1B", err.Error())
+ assert.Equal(t, "", pool)
+ assert.Equal(t, 0, size)
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/hostnames.go b/internal/pkg/dacctl/actions_impl/parsers/hostnames.go
new file mode 100644
index 00000000..86e8fa84
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/hostnames.go
@@ -0,0 +1,31 @@
+package parsers
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
+ "regexp"
+)
+
+// TODO: allowed "-" so uuid passes validation
+var nameRegex = regexp.MustCompile("^[a-zA-Z0-9.-]+$")
+
+func IsValidName(name string) bool {
+ return nameRegex.Match([]byte(name))
+}
+
+func GetHostnamesFromFile(disk fileio.Disk, filename string) ([]string, error) {
+ computeHosts, err := disk.Lines(filename)
+ if err != nil {
+ return nil, err
+ }
+ var invalidHosts []string
+ for _, computeHost := range computeHosts {
+ if !IsValidName(computeHost) {
+ invalidHosts = append(invalidHosts, computeHost)
+ }
+ }
+ if len(invalidHosts) > 0 {
+ return nil, fmt.Errorf("invalid hostname in: %s", invalidHosts)
+ }
+ return computeHosts, nil
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/hostnames_test.go b/internal/pkg/dacctl/actions_impl/parsers/hostnames_test.go
new file mode 100644
index 00000000..f6bfa94f
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/hostnames_test.go
@@ -0,0 +1,54 @@
+package parsers
+
+import (
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_fileio"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestGetHostnamesFromFile(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ fakeHosts := []string{"test1", "test2"}
+ disk.EXPECT().Lines("file").Return(fakeHosts, nil)
+
+ hosts, err := GetHostnamesFromFile(disk, "file")
+ assert.Nil(t, err)
+ assert.Equal(t, fakeHosts, hosts)
+}
+
+func TestGetHostnamesFromFile_Empty(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ disk.EXPECT().Lines("file").Return(nil, nil)
+
+ hosts, err := GetHostnamesFromFile(disk, "file")
+ assert.Nil(t, err)
+ var fakeHosts []string
+ assert.Equal(t, fakeHosts, hosts)
+
+ fakeErr := errors.New("bob")
+ disk.EXPECT().Lines("file").Return(nil, fakeErr)
+ hosts, err = GetHostnamesFromFile(disk, "file")
+ assert.Equal(t, fakeHosts, hosts)
+ assert.Equal(t, "bob", err.Error())
+}
+
+func TestGetHostnamesFromFile_ErrorOnBadHostname(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ fakeHosts := []string{"Test", "test", "test1", "test2.com", "bad hostname", "foo/bar", ""}
+ disk.EXPECT().Lines("file").Return(fakeHosts, nil)
+
+ hosts, err := GetHostnamesFromFile(disk, "file")
+ assert.Nil(t, hosts)
+ assert.Equal(t, "invalid hostname in: [bad hostname foo/bar ]", err.Error())
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/job.go b/internal/pkg/dacctl/actions_impl/parsers/job.go
new file mode 100644
index 00000000..a6526f40
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/job.go
@@ -0,0 +1,229 @@
+package parsers
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
+ "log"
+ "strings"
+)
+
+type jobSummary struct {
+ PerJobBuffer *cmdPerJobBuffer
+ Swap *cmdAttachPerJobSwap
+ Attachments []datamodel.SessionName
+ DataIn []datamodel.DataCopyRequest
+ DataOut []datamodel.DataCopyRequest
+ // TODO: support create and destroy persistent?
+ //createPersistent *cmdCreatePersistent
+ //destroyPersistent *cmdDestroyPersistent
+}
+
+func ParseJobFile(disk fileio.Disk, filename string) (jobSummary, error) {
+ lines, err := disk.Lines(filename)
+ if err != nil {
+ return jobSummary{}, err
+ }
+ return getJobSummary(lines)
+}
+
+func getJobSummary(lines []string) (jobSummary, error) {
+ var summary jobSummary
+ jobCommands, err := parseJobRequest(lines)
+ if err != nil {
+ return summary, err
+ }
+
+ for _, cmd := range jobCommands {
+ switch c := cmd.(type) {
+ case cmdPerJobBuffer:
+ if summary.PerJobBuffer == nil {
+ summary.PerJobBuffer = &c
+ } else {
+ return jobSummary{}, fmt.Errorf("only one per job buffer allowed")
+ }
+ case cmdAttachPersistent:
+ summary.Attachments = append(summary.Attachments, datamodel.SessionName(c))
+ case cmdAttachPerJobSwap:
+ if summary.Swap != nil {
+ return jobSummary{}, fmt.Errorf("only one swap request allowed")
+ }
+ summary.Swap = &c
+ case cmdStageOutData:
+ summary.DataOut = append(summary.DataOut, datamodel.DataCopyRequest{
+ SourceType: c.SourceType,
+ Source: c.Source,
+ Destination: c.Destination,
+ })
+ case cmdStageInData:
+ summary.DataIn = append(summary.DataIn, datamodel.DataCopyRequest{
+ SourceType: c.SourceType,
+ Source: c.Source,
+ Destination: c.Destination,
+ })
+ }
+ }
+ return summary, nil
+}
+
+type jobCommand interface{}
+
+var stringToAccessMode = map[string]datamodel.AccessMode{
+ "": datamodel.Striped,
+ "striped": datamodel.Striped,
+ "private": datamodel.Private,
+ "private,striped": datamodel.PrivateAndStriped,
+ "striped,private": datamodel.PrivateAndStriped,
+}
+
+func accessModeFromString(raw string) datamodel.AccessMode {
+ return stringToAccessMode[strings.ToLower(raw)]
+}
+
+var stringToBufferType = map[string]datamodel.BufferType{
+ "": datamodel.Scratch,
+ "scratch": datamodel.Scratch,
+ "cache": datamodel.Cache,
+}
+
+type cmdCreatePersistent struct {
+ Name string
+ CapacityBytes int
+ AccessMode datamodel.AccessMode
+ BufferType datamodel.BufferType
+ GenericCmd bool
+}
+
+func bufferTypeFromString(raw string) datamodel.BufferType {
+ return stringToBufferType[strings.ToLower(raw)]
+}
+
+type cmdDestroyPersistent string
+
+type cmdAttachPersistent string
+
+type cmdPerJobBuffer struct {
+ CapacityBytes int
+ AccessMode datamodel.AccessMode
+ BufferType datamodel.BufferType
+ GenericCmd bool
+}
+
+type cmdAttachPerJobSwap struct {
+ SizeBytes int
+}
+
+var stringToStageType = map[string]datamodel.SourceType{
+ "directory": datamodel.Directory,
+ "file": datamodel.File,
+ "list": datamodel.List,
+}
+
+func sourceTypeFromString(raw string) datamodel.SourceType {
+ return stringToStageType[strings.ToLower(raw)]
+}
+
+type cmdStageInData datamodel.DataCopyRequest
+
+type cmdStageOutData datamodel.DataCopyRequest
+
+func parseArgs(rawArgs []string) (map[string]string, error) {
+ args := make(map[string]string, len(rawArgs))
+ for _, arg := range rawArgs {
+ parts := strings.Split(arg, "=")
+ if len(parts) != 2 {
+ return args, fmt.Errorf("unable to parse arg: %s", arg)
+ }
+ args[strings.ToLower(parts[0])] = parts[1]
+ }
+ return args, nil
+}
+
+func parseJobRequest(lines []string) ([]jobCommand, error) {
+ var commands []jobCommand
+ for _, line := range lines {
+ tokens := strings.Split(line, " ")
+ if len(tokens) < 3 {
+ if line != "" && line != "#!/bin/bash" {
+ log.Println("Skip badly formatted line:", line)
+ }
+ continue
+ }
+
+ cmdType := tokens[0]
+ cmd := tokens[1]
+ args := tokens[2:]
+
+ var isGeneric bool
+ switch cmdType {
+ case "#DW":
+ isGeneric = false
+ case "#BB":
+ isGeneric = true
+ default:
+ log.Println("unrecognised command type:", cmdType)
+ continue
+ }
+
+ argKeyPair, _ := parseArgs(args) // TODO deal with errors when not swap
+
+ var command jobCommand
+ switch cmd {
+ case "create_persistent":
+ size, err := ParseSize(argKeyPair["capacity"])
+ if err != nil {
+ log.Println(err)
+ return nil, err
+ }
+ command = cmdCreatePersistent{
+ Name: argKeyPair["name"],
+ CapacityBytes: size,
+ GenericCmd: isGeneric,
+ AccessMode: accessModeFromString(argKeyPair["access_mode"]),
+ BufferType: bufferTypeFromString(argKeyPair["type"]),
+ }
+ case "destroy_persistent":
+ command = cmdDestroyPersistent(argKeyPair["name"])
+ case "persistentdw":
+ command = cmdAttachPersistent(argKeyPair["name"])
+ case "jobdw":
+ size, err := ParseSize(argKeyPair["capacity"])
+ if err != nil {
+ log.Println(err)
+ return nil, err
+ }
+ command = cmdPerJobBuffer{
+ CapacityBytes: size,
+ GenericCmd: isGeneric,
+ AccessMode: accessModeFromString(argKeyPair["access_mode"]),
+ BufferType: bufferTypeFromString(argKeyPair["type"]),
+ }
+ case "swap":
+ if len(args) != 1 {
+ return nil, fmt.Errorf("unable to parse swap command: %s", line)
+ }
+ if size, err := ParseSize(args[0]); err != nil {
+ log.Println(err)
+ return nil, err
+ } else {
+ command = cmdAttachPerJobSwap{SizeBytes: size}
+ }
+ case "stage_in":
+ command = cmdStageInData{
+ Source: argKeyPair["source"],
+ Destination: argKeyPair["destination"],
+ SourceType: sourceTypeFromString(argKeyPair["type"]),
+ }
+ case "stage_out":
+ command = cmdStageOutData{
+ Source: argKeyPair["source"],
+ Destination: argKeyPair["destination"],
+ SourceType: sourceTypeFromString(argKeyPair["type"]),
+ }
+ default:
+ return nil, fmt.Errorf("unrecognised command: %s with arguments: %s", cmd, args)
+ }
+ commands = append(commands, command)
+ }
+ return commands, nil
+}
diff --git a/internal/pkg/dacctl/actions_impl/parsers/job_test.go b/internal/pkg/dacctl/actions_impl/parsers/job_test.go
new file mode 100644
index 00000000..d58e623a
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/parsers/job_test.go
@@ -0,0 +1,126 @@
+package parsers
+
+import (
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_fileio"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "log"
+ "testing"
+)
+
+func TestParseJobFile(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ mockFileIO := mock_fileio.NewMockDisk(mockCtrl)
+ mockFileIO.EXPECT().Lines("testfile").Return(nil, errors.New("asdf"))
+
+ lines, err := ParseJobFile(mockFileIO, "testfile")
+ assert.Equal(t, jobSummary{}, lines)
+ assert.Equal(t, "asdf", err.Error())
+
+ mockFileIO = mock_fileio.NewMockDisk(mockCtrl)
+ mockFileIO.EXPECT().Lines("testfile").Return([]string{`#DW swap asdf`}, nil)
+
+ lines, err = ParseJobFile(mockFileIO, "testfile")
+ assert.Equal(t, jobSummary{}, lines)
+ assert.Equal(t, "unable to parse size: asdf", err.Error())
+
+}
+
+func TestParseJobRequest(t *testing.T) {
+ jobRequest := []string{
+ `#BB create_persistent name=myBBname capacity=100GB access_mode=striped type=scratch`,
+ `#BB create_persistent name=myBBname capacity=1073741824 access_mode=striped type=cache`,
+ `#BB destroy_persistent name=myBBname`,
+ `#DW persistentdw name=myBBname1`,
+ `#DW persistentdw name=myBBname2`,
+ `#DW persistentdw name=myBBname2`,
+ `#DW jobdw capacity=10GB access_mode=striped type=scratch`,
+ `#DW jobdw capacity=2TB access_mode=private type=scratch`,
+ `#DW jobdw capacity=4TiB access_mode=striped,private type=scratch`,
+ `#BB jobdw capacity=42GiB access_mode=ldbalance type=cache pfs=/global/scratch1/john`,
+ `#DW swap 3TiB`,
+ `#DW stage_in source=/global/cscratch1/filename1 destination=$DW_JOB_STRIPED/filename1 type=file`,
+ `#DW stage_out source=$DW_JOB_STRIPED/outdir destination=/global/scratch1/outdir type=directory`,
+ }
+ if cmds, err := parseJobRequest(jobRequest); err != nil {
+ log.Fatal(err)
+ } else {
+ assert.Equal(t, 13, len(jobRequest)) // TODO should check returned values!!
+ for _, cmd := range cmds {
+ log.Printf("Cmd: %T Args: %s\n", cmd, cmd)
+ }
+ }
+}
+
+func TestGetJobSummary(t *testing.T) {
+ lines := []string{
+ `#DW persistentdw name=myBBname1`,
+ `#DW persistentdw name=myBBname2`,
+ `#DW jobdw capacity=4MiB access_mode=striped,private type=scratch`,
+ `#DW swap 4MB`,
+ `#DW stage_in source=/global/cscratch1/filename1 destination=$DW_JOB_STRIPED/filename1 type=file`,
+ `#DW stage_in source=/global/cscratch1/filename2 destination=$DW_JOB_STRIPED/filename2 type=file`,
+ `#DW stage_out source=$DW_JOB_STRIPED/outdir destination=/global/scratch1/outdir type=directory`,
+ `skipping other lines that we`,
+ `don't understand`,
+ }
+ result, err := getJobSummary(lines)
+
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(result.DataIn))
+ assert.Equal(t, 1, len(result.DataOut))
+ assert.EqualValues(t, "/global/cscratch1/filename1", result.DataIn[0].Source)
+ assert.EqualValues(t, "/global/cscratch1/filename2", result.DataIn[1].Source)
+ assert.EqualValues(t, "$DW_JOB_STRIPED/outdir", result.DataOut[0].Source)
+
+ assert.Equal(t, 2, len(result.Attachments))
+ assert.Equal(t, datamodel.SessionName("myBBname1"), result.Attachments[0])
+ assert.Equal(t, datamodel.SessionName("myBBname2"), result.Attachments[1])
+
+ assert.Equal(t, 4194304, result.PerJobBuffer.CapacityBytes)
+ assert.Equal(t, 4000000, result.Swap.SizeBytes)
+}
+
+func TestGetJobSummary_Errors(t *testing.T) {
+ lines := []string{`#DW bad_command asdf=asdf`}
+ result, err := getJobSummary(lines)
+ assert.Equal(t, "unrecognised command: bad_command with arguments: [asdf=asdf]", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{`#DW swap 1B asdf`}
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "unable to parse swap command: #DW swap 1B asdf", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{`#DW swap 1B`}
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "unable to parse size: 1B", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{`#DW jobdw capacity=4B access_mode=striped,private type=scratch`}
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "unable to parse size: 4B", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{`#BB create_persistent name=myBBname capacity=100B access_mode=striped type=scratch`}
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "unable to parse size: 100B", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{`#DW swap 1MB`, `#DW swap 2MB`}
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "only one swap request allowed", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+
+ lines = []string{
+ `#DW jobdw capacity=4MiB access_mode=private type=scratch`,
+ `#DW jobdw capacity=5MiB access_mode=striped type=scratch`,
+ }
+ result, err = getJobSummary(lines)
+ assert.Equal(t, "only one per job buffer allowed", err.Error())
+ assert.Nil(t, result.PerJobBuffer)
+}
diff --git a/internal/pkg/dacctl/actions_impl/persistent.go b/internal/pkg/dacctl/actions_impl/persistent.go
new file mode 100644
index 00000000..182455f5
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/persistent.go
@@ -0,0 +1,67 @@
+package actions_impl
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ parsers2 "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "strings"
+ "time"
+)
+
+var stringToAccessMode = map[string]datamodel.AccessMode{
+ "": datamodel.Striped,
+ "striped": datamodel.Striped,
+ "private": datamodel.Private,
+ "private,striped": datamodel.PrivateAndStriped,
+ "striped,private": datamodel.PrivateAndStriped,
+}
+
+func accessModeFromString(raw string) datamodel.AccessMode {
+ return stringToAccessMode[strings.ToLower(raw)]
+}
+
+var stringToBufferType = map[string]datamodel.BufferType{
+ "": datamodel.Scratch,
+ "scratch": datamodel.Scratch,
+ "cache": datamodel.Cache,
+}
+
+func bufferTypeFromString(raw string) datamodel.BufferType {
+ return stringToBufferType[strings.ToLower(raw)]
+}
+
+var fakeTime uint = 0
+
+func getNow() uint {
+ if fakeTime != 0 {
+ return fakeTime
+ }
+ return uint(time.Now().Unix())
+}
+
+func (d *dacctlActions) CreatePersistentBuffer(c dacctl.CliContext) error {
+ err := checkRequiredStrings(c, "token", "caller", "capacity", "access", "type")
+ if err != nil {
+ return err
+ }
+ pool, capacityBytes, err := parsers2.ParseCapacityBytes(c.String("capacity"))
+ if err != nil {
+ return err
+ }
+ request := datamodel.VolumeRequest{
+ MultiJob: true,
+ Caller: c.String("caller"),
+ TotalCapacityBytes: capacityBytes,
+ PoolName: datamodel.PoolName(pool),
+ Access: accessModeFromString(c.String("access")),
+ Type: bufferTypeFromString(c.String("type")),
+ }
+ session := datamodel.Session{
+ Name: datamodel.SessionName(c.String("token")),
+ VolumeRequest: request,
+ Owner: uint(c.Int("user")),
+ Group: uint(c.Int("group")),
+ CreatedAt: getNow(),
+ }
+ return d.session.CreateSession(session)
+}
diff --git a/internal/pkg/dacctl/actions_impl/persistent_test.go b/internal/pkg/dacctl/actions_impl/persistent_test.go
new file mode 100644
index 00000000..6c61e65e
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/persistent_test.go
@@ -0,0 +1,34 @@
+package actions_impl
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_facade"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestDacctlActions_CreatePersistentBuffer(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+
+ session.EXPECT().CreateSession(datamodel.Session{
+ Name: "token",
+ Owner: 1001,
+ Group: 1002,
+ CreatedAt: 123,
+ VolumeRequest: datamodel.VolumeRequest{
+ MultiJob: true,
+ Caller: "caller",
+ PoolName: "pool1",
+ TotalCapacityBytes: 2147483648,
+ },
+ }).Return(nil)
+ fakeTime = 123
+
+ actions := dacctlActions{session: session}
+ err := actions.CreatePersistentBuffer(getMockCliContext(2))
+
+ assert.Nil(t, err)
+}
diff --git a/internal/pkg/dacctl/actions_impl/pools.go b/internal/pkg/dacctl/actions_impl/pools.go
new file mode 100644
index 00000000..1d5cac81
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/pools.go
@@ -0,0 +1,25 @@
+package actions_impl
+
+import (
+ "encoding/json"
+ "log"
+)
+
+type pool struct {
+ Id string `json:"id"`
+ Units string `json:"units"`
+ Granularity uint `json:"granularity"`
+ Quantity uint `json:"quantity"`
+ Free uint `json:"free"`
+}
+
+type pools []pool
+
+func getPoolsAsString(list pools) string {
+ message := map[string]pools{"pools": list}
+ output, err := json.Marshal(message)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ return string(output)
+}
diff --git a/internal/pkg/dacctl/actions_impl/sessions.go b/internal/pkg/dacctl/actions_impl/sessions.go
new file mode 100644
index 00000000..d3649161
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/sessions.go
@@ -0,0 +1,24 @@
+package actions_impl
+
+import (
+ "encoding/json"
+ "log"
+)
+
+type session struct {
+ Id string `json:"id"`
+ Created uint `json:"created"`
+ Owner uint `json:"owner"`
+ Token string `json:"token"`
+}
+
+type sessions []session
+
+func sessonsToString(list []session) string {
+ message := map[string]sessions{"sessions": list}
+ output, err := json.Marshal(message)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ return string(output)
+}
diff --git a/internal/pkg/dacctl/actions_impl/show.go b/internal/pkg/dacctl/actions_impl/show.go
new file mode 100644
index 00000000..f98e4525
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/show.go
@@ -0,0 +1,104 @@
+package actions_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+)
+
+func (d *dacctlActions) getSession(c dacctl.CliContext) (datamodel.Session, error) {
+ sessionName, err := d.getSessionName(c)
+ if err != nil {
+ return datamodel.Session{}, err
+ }
+ return d.session.GetSession(sessionName)
+}
+
+func (d *dacctlActions) RealSize(c dacctl.CliContext) (string, error) {
+ session, err := d.getSession(c)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf(
+ `{"token":"%s", "capacity":%d, "units":"bytes"}`,
+ session.Name, session.ActualSizeBytes), nil
+}
+
+func (d *dacctlActions) Paths(c dacctl.CliContext) error {
+ err := checkRequiredStrings(c, "token", "pathfile")
+ if err != nil {
+ return err
+ }
+
+ session, err := d.getSession(c)
+ if err != nil {
+ return err
+ }
+
+ var paths []string
+ for key, value := range session.Paths {
+ paths = append(paths, fmt.Sprintf("%s=%s", key, value))
+ }
+ return d.disk.Write(c.String("pathfile"), paths)
+}
+
+func (d *dacctlActions) ShowInstances() (string, error) {
+ allSessions, err := d.session.GetAllSessions()
+ if err != nil {
+ return "", err
+ }
+
+ instances := instances{}
+ for _, session := range allSessions {
+ instances = append(instances, instance{
+ Id: string(session.Name),
+ Capacity: instanceCapacity{Bytes: uint(session.ActualSizeBytes)},
+ Links: instanceLinks{string(session.Name)},
+ })
+ }
+ return instancesToString(instances), nil
+}
+
+func (d *dacctlActions) ShowSessions() (string, error) {
+ allSessions, err := d.session.GetAllSessions()
+ if err != nil {
+ return "", err
+ }
+
+ sessions := sessions{}
+ for _, s := range allSessions {
+ sessions = append(sessions, session{
+ Id: string(s.Name),
+ Created: s.CreatedAt,
+ Owner: s.Owner,
+ Token: string(s.Name),
+ })
+ }
+ return sessonsToString(sessions), nil
+}
+
+func (d *dacctlActions) ListPools() (string, error) {
+ allPools, err := d.session.GetPools()
+ if err != nil {
+ return "", err
+ }
+
+ pools := pools{}
+ for _, regPool := range allPools {
+ free := len(regPool.AvailableBricks)
+ quantity := free + len(regPool.AllocatedBricks)
+ pools = append(pools, pool{
+ Id: string(regPool.Pool.Name),
+ Units: "bytes",
+ Granularity: regPool.Pool.GranularityBytes,
+ Quantity: uint(quantity),
+ Free: uint(free),
+ })
+ }
+ return getPoolsAsString(pools), nil
+}
+
+func (d *dacctlActions) ShowConfigurations() (string, error) {
+ // NOTE: Slurm doesn't read any of the output, so we don't send anything
+ return configurationToString(configurations{}), nil
+}
diff --git a/internal/pkg/dacctl/actions_impl/show_test.go b/internal/pkg/dacctl/actions_impl/show_test.go
new file mode 100644
index 00000000..b8ab91b4
--- /dev/null
+++ b/internal/pkg/dacctl/actions_impl/show_test.go
@@ -0,0 +1,189 @@
+package actions_impl
+
+import (
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_fileio"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestDacctlActions_RealSize(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ session.EXPECT().GetSession(datamodel.SessionName("bar")).Return(datamodel.Session{
+ Name: datamodel.SessionName("bar"),
+ ActualSizeBytes: 123,
+ }, nil)
+
+ actions := dacctlActions{session: session}
+ output, err := actions.RealSize(&mockCliContext{
+ strings: map[string]string{"token": "bar"},
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, `{"token":"bar", "capacity":123, "units":"bytes"}`, output)
+
+ _, err = actions.RealSize(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token", err.Error())
+}
+
+func TestDacctlActions_Paths(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ disk := mock_fileio.NewMockDisk(mockCtrl)
+
+ session.EXPECT().GetSession(datamodel.SessionName("bar")).Return(datamodel.Session{
+ Name: datamodel.SessionName("bar"),
+ Paths: map[string]string{
+ "foo1": "bar1",
+ },
+ }, nil)
+ disk.EXPECT().Write("paths", []string{"foo1=bar1"})
+
+ actions := dacctlActions{session: session, disk: disk}
+ err := actions.Paths(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "pathfile": "paths",
+ },
+ })
+
+ assert.Nil(t, err)
+
+ err = actions.Paths(&mockCliContext{})
+ assert.Equal(t, "Please provide these required parameters: token, pathfile", err.Error())
+
+ fakeError := errors.New("fake")
+ session.EXPECT().GetSession(datamodel.SessionName("bar")).Return(datamodel.Session{}, fakeError)
+ err = actions.Paths(&mockCliContext{
+ strings: map[string]string{
+ "token": "bar",
+ "pathfile": "paths",
+ },
+ })
+ assert.Equal(t, fakeError, err)
+}
+
+func TestDacctlActions_ShowInstances(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ session.EXPECT().GetAllSessions().Return([]datamodel.Session{
+ {
+ Name: datamodel.SessionName("foo"),
+ ActualSizeBytes: 123,
+ },
+ {
+ Name: datamodel.SessionName("bar"),
+ ActualSizeBytes: 456,
+ },
+ }, nil)
+ actions := dacctlActions{session: session}
+
+ output, err := actions.ShowInstances()
+
+ assert.Nil(t, err)
+ expected := `{"instances":[{"id":"foo","capacity":{"bytes":123,"nodes":0},"links":{"session":"foo"}},{"id":"bar","capacity":{"bytes":456,"nodes":0},"links":{"session":"bar"}}]}`
+ assert.Equal(t, expected, output)
+
+ fakeErr := errors.New("fake")
+ session.EXPECT().GetAllSessions().Return(nil, fakeErr)
+ output, err = actions.ShowInstances()
+ assert.Equal(t, "", output)
+ assert.Equal(t, fakeErr, err)
+
+ session.EXPECT().GetAllSessions().Return(nil, nil)
+ output, err = actions.ShowInstances()
+ assert.Nil(t, err)
+ expected = `{"instances":[]}`
+ assert.Equal(t, expected, output)
+}
+
+func TestDacctlActions_ShowSessions(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ session.EXPECT().GetAllSessions().Return([]datamodel.Session{
+ {
+ Name: datamodel.SessionName("foo"),
+ Owner: 42,
+ CreatedAt: 1234,
+ },
+ {
+ Name: datamodel.SessionName("bar"),
+ Owner: 43,
+ CreatedAt: 5678,
+ },
+ }, nil)
+ actions := dacctlActions{session: session}
+
+ output, err := actions.ShowSessions()
+
+ assert.Nil(t, err)
+ expected := `{"sessions":[{"id":"foo","created":1234,"owner":42,"token":"foo"},{"id":"bar","created":5678,"owner":43,"token":"bar"}]}`
+ assert.Equal(t, expected, output)
+
+ fakeErr := errors.New("fake")
+ session.EXPECT().GetAllSessions().Return(nil, fakeErr)
+ output, err = actions.ShowSessions()
+ assert.Equal(t, "", output)
+ assert.Equal(t, fakeErr, err)
+
+ session.EXPECT().GetAllSessions().Return(nil, nil)
+ output, err = actions.ShowSessions()
+ assert.Nil(t, err)
+ expected = `{"sessions":[]}`
+ assert.Equal(t, expected, output)
+}
+
+func TestDacctlActions_ListPools(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ session := mock_facade.NewMockSession(mockCtrl)
+ session.EXPECT().GetPools().Return([]datamodel.PoolInfo{
+ {
+ Pool: datamodel.Pool{
+ Name: "default",
+ GranularityBytes: 1024,
+ },
+ AllocatedBricks: []datamodel.BrickAllocation{
+ {
+ Brick: datamodel.Brick{Device: "sda"},
+ },
+ },
+ AvailableBricks: []datamodel.Brick{
+ {Device: "sdb"},
+ {Device: "sdc"},
+ },
+ },
+ }, nil)
+ actions := dacctlActions{session: session}
+
+ output, err := actions.ListPools()
+ assert.Nil(t, err)
+ expexted := `{"pools":[{"id":"default","units":"bytes","granularity":1024,"quantity":3,"free":2}]}`
+ assert.Equal(t, expexted, output)
+
+ session.EXPECT().GetPools().Return(nil, nil)
+ output, err = actions.ListPools()
+ assert.Nil(t, err)
+ assert.Equal(t, `{"pools":[]}`, output)
+
+ fakeErr := errors.New("fake")
+ session.EXPECT().GetPools().Return(nil, fakeErr)
+ output, err = actions.ListPools()
+ assert.Equal(t, fakeErr, err)
+ assert.Equal(t, "", output)
+}
+
+func TestDacctlActions_ShowConfigurations(t *testing.T) {
+ actions := dacctlActions{}
+ output, err := actions.ShowConfigurations()
+ assert.Nil(t, err)
+ assert.Equal(t, `{"configurations":[]}`, output)
+}
diff --git a/internal/pkg/dacctl/buffer.go b/internal/pkg/dacctl/buffer.go
deleted file mode 100644
index b1b8b472..00000000
--- a/internal/pkg/dacctl/buffer.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package dacctl
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/lifecycle"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "strings"
- "time"
-)
-
-func DeleteBufferComponents(volumeRegistry registry.VolumeRegistry, poolRegistry registry.PoolRegistry,
- token string) error {
-
- job, err := volumeRegistry.Job(token)
- if err != nil {
- if strings.Contains(err.Error(), "unable to find any values for key") {
- log.Println("Unable to find job, must be deleted already or never created.")
- return nil
- }
- return err
- }
-
- if job.JobVolume != "" {
- volume, err := volumeRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- } else {
- vlm := lifecycle.NewVolumeLifecycleManager(volumeRegistry, poolRegistry, volume)
- if err := vlm.Delete(); err != nil {
- return err
- }
- }
- }
-
- return volumeRegistry.DeleteJob(token)
-}
-
-func CreatePerJobBuffer(volumeRegistry registry.VolumeRegistry, poolRegistry registry.PoolRegistry, disk fileio.Disk,
- token string, user int, group int, capacity string, caller string, jobFile string, nodeFile string) error {
- summary, err := ParseJobFile(disk, jobFile)
- if err != nil {
- return err
- }
-
- if nodeFile != "" {
- // TODO we could add this into the volume as a scheduling hint, when its available?
- log.Printf("Ignoring nodeFile in setup: %s", nodeFile)
- }
-
- pool, bricksRequired, err := getPoolAndBrickCount(poolRegistry, capacity)
- if err != nil {
- return err
- }
-
- createdAt := uint(time.Now().Unix())
- job := registry.Job{
- Name: token,
- Owner: uint(user),
- CreatedAt: createdAt,
- }
-
- var perJobVolume *registry.Volume
- if bricksRequired > 0 && summary.PerJobBuffer != nil {
- perJobVolume = getPerJobVolume(token, pool, bricksRequired,
- user, group, caller, createdAt, summary)
-
- err := volumeRegistry.AddVolume(*perJobVolume)
- if err != nil {
- return err
- }
-
- job.JobVolume = perJobVolume.Name
- }
-
- for _, attachment := range summary.Attachments {
- name := registry.VolumeName(attachment.Name)
- volume, err := volumeRegistry.Volume(name)
- if err != nil {
- return err
- }
- // TODO: need to check permissions and not just go for it!
- if !volume.MultiJob {
- return fmt.Errorf("%s is not a multijob volume", volume.Name)
- }
- job.MultiJobVolumes = append(job.MultiJobVolumes, volume.Name)
- }
-
- job.Paths = setPaths(perJobVolume, job)
-
- err = volumeRegistry.AddJob(job)
- if err != nil {
- if job.JobVolume != "" {
- volumeRegistry.DeleteVolume(job.JobVolume)
- }
- return err
- }
-
- if job.JobVolume != "" {
- volume, err := volumeRegistry.Volume(job.JobVolume)
- vlm := lifecycle.NewVolumeLifecycleManager(volumeRegistry, poolRegistry, volume)
- err = vlm.ProvisionBricks()
- if err != nil {
- log.Println("Bricks may be left behnd, not deleting volume due to: ", err)
- return err
- }
- }
- return nil
-}
-
-func setPaths(perJobVolume *registry.Volume, job registry.Job) map[string]string {
- paths := make(map[string]string)
- if perJobVolume != nil {
- if perJobVolume.AttachPrivateNamespace {
- paths["DW_JOB_PRIVATE"] = fmt.Sprintf("/dac/%s_job_private", job.Name)
- }
- if perJobVolume.AttachGlobalNamespace {
- paths["DW_JOB_STRIPED"] = fmt.Sprintf("/dac/%s_job/global", job.Name)
- }
- }
- for _, multiJobVolume := range job.MultiJobVolumes {
- paths[fmt.Sprintf("DW_PERSISTENT_STRIPED_%s", multiJobVolume)] = fmt.Sprintf(
- "/dac/%s_persistent_%s", job.Name, multiJobVolume)
- }
- return paths
-}
-
-func getPerJobVolume(token string, pool *registry.Pool, bricksRequired uint,
- user int, group int, caller string, createdAt uint, summary jobSummary) *registry.Volume {
- adjustedSizeGB := bricksRequired * pool.GranularityGB
- perJobVolume := registry.Volume{
- Name: registry.VolumeName(token),
- MultiJob: false,
- State: registry.Registered,
- Pool: pool.Name,
- SizeBricks: bricksRequired,
- SizeGB: adjustedSizeGB,
- JobName: token,
- Owner: uint(user),
- Group: uint(group),
- CreatedBy: caller,
- CreatedAt: createdAt,
- }
- jobBuffer := summary.PerJobBuffer
- if jobBuffer.BufferType == scratch &&
- (jobBuffer.AccessMode == private || jobBuffer.AccessMode == privateAndStriped) {
- perJobVolume.AttachPrivateNamespace = true
- }
- if jobBuffer.BufferType == scratch &&
- (jobBuffer.AccessMode == striped || jobBuffer.AccessMode == privateAndStriped) {
- perJobVolume.AttachGlobalNamespace = true
- }
- if jobBuffer.BufferType == scratch && summary.Swap != nil {
- perJobVolume.AttachAsSwapBytes = uint(summary.Swap.SizeBytes)
- }
- // TODO that can be many data_in and data_out, we only allow one relating to striped job buffer
- if summary.DataIn != nil && summary.DataIn.Source != "" {
- // TODO check destination includes striped buffer path?
- perJobVolume.StageIn.Source = summary.DataIn.Source
- perJobVolume.StageIn.Destination = summary.DataIn.Destination
- switch summary.DataIn.StageType {
- case file:
- perJobVolume.StageIn.SourceType = registry.File
- case directory:
- perJobVolume.StageIn.SourceType = registry.Directory
- }
- }
- if summary.DataOut != nil && summary.DataOut.Source != "" {
- // TODO check source includes striped buffer path?
- perJobVolume.StageOut.Source = summary.DataOut.Source
- perJobVolume.StageOut.Destination = summary.DataOut.Destination
- switch summary.DataOut.StageType {
- case file:
- perJobVolume.StageOut.SourceType = registry.File
- case directory:
- perJobVolume.StageOut.SourceType = registry.Directory
- }
- }
- return &perJobVolume
-}
diff --git a/internal/pkg/dacctl/buffer_test.go b/internal/pkg/dacctl/buffer_test.go
deleted file mode 100644
index 1a99f82b..00000000
--- a/internal/pkg/dacctl/buffer_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package dacctl
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/mocks"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestCreatePerJobBuffer(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
- mockPoolReg := mocks.NewMockPoolRegistry(mockCtrl)
- mockDisk := mocks.NewMockDisk(mockCtrl)
- mockDisk.EXPECT().Lines("jobfile")
-
- err := CreatePerJobBuffer(mockVolReg, mockPoolReg, mockDisk, "token",
- 2, 2, "", "test", "jobfile", "nodefile")
- assert.Equal(t, "must format capacity correctly and include pool", err.Error())
-}
-
-func TestGetPerJobVolume(t *testing.T) {
- pool := registry.Pool{}
- summary := jobSummary{
- PerJobBuffer: &cmdPerJobBuffer{},
- }
- volume := getPerJobVolume("token", &pool, 3, 42, 42,
- "test", 20, summary)
- // TODO: lots more work to do here!
- assert.Equal(t, registry.VolumeName("token"), volume.Name)
- assert.True(t, volume.AttachGlobalNamespace)
- assert.False(t, volume.AttachPrivateNamespace)
-}
-
-func TestSetPaths(t *testing.T) {
- volume := registry.Volume{
- Name: "job1",
- UUID: "uuid1",
- AttachPrivateNamespace: true,
- AttachGlobalNamespace: true,
- }
- job := registry.Job{
- Name: "job1",
- MultiJobVolumes: []registry.VolumeName{
- registry.VolumeName("multi1"),
- registry.VolumeName("multi2"),
- },
- }
-
- paths := setPaths(&volume, job)
-
- assert.Equal(t, 4, len(paths))
- assert.Equal(t,
- "/dac/job1_job_private",
- paths["DW_JOB_PRIVATE"])
- assert.Equal(t,
- "/dac/job1_job/global",
- paths["DW_JOB_STRIPED"])
- assert.Equal(t,
- "/dac/job1_persistent_multi1",
- paths["DW_PERSISTENT_STRIPED_multi1"])
- assert.Equal(t,
- "/dac/job1_persistent_multi2",
- paths["DW_PERSISTENT_STRIPED_multi2"])
-}
diff --git a/internal/pkg/dacctl/interface.go b/internal/pkg/dacctl/interface.go
new file mode 100644
index 00000000..d4b58d7b
--- /dev/null
+++ b/internal/pkg/dacctl/interface.go
@@ -0,0 +1,25 @@
+package dacctl
+
+type CliContext interface {
+ String(name string) string
+ Int(name string) int
+ Bool(name string) bool
+}
+
+type DacctlActions interface {
+ CreatePersistentBuffer(c CliContext) error
+ DeleteBuffer(c CliContext) error
+ CreatePerJobBuffer(c CliContext) error
+ ShowInstances() (string, error)
+ ShowSessions() (string, error)
+ ListPools() (string, error)
+ ShowConfigurations() (string, error)
+ ValidateJob(c CliContext) error
+ RealSize(c CliContext) (string, error)
+ DataIn(c CliContext) error
+ Paths(c CliContext) error
+ PreRun(c CliContext) error
+ PostRun(c CliContext) error
+ DataOut(c CliContext) error
+ GenerateAnsible(c CliContext) (string, error)
+}
diff --git a/internal/pkg/dacctl/job.go b/internal/pkg/dacctl/job.go
deleted file mode 100644
index 992e3efa..00000000
--- a/internal/pkg/dacctl/job.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package dacctl
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/fileio"
- "log"
- "strconv"
- "strings"
-)
-
-type jobSummary struct {
- PerJobBuffer *cmdPerJobBuffer
- Swap *cmdAttachPerJobSwap
- Attachments []cmdAttachPersistent
- DataIn *cmdStageInData
- DataOut *cmdStageOutData
- //createPersistent *cmdCreatePersistent
- //destroyPersistent *cmdDestroyPersistent
-}
-
-func (s jobSummary) String() string {
- return toJson(s)
-}
-
-// Parse a given job file
-func ParseJobFile(disk fileio.Disk, filename string) (jobSummary, error) {
- lines, err := disk.Lines(filename)
- if err != nil {
- return jobSummary{}, err
- }
- return getJobSummary(lines)
-}
-
-func getJobSummary(lines []string) (jobSummary, error) {
- var summary jobSummary
- jobCommands, err := parseJobRequest(lines)
- if err != nil {
- return summary, err
- }
-
- for _, cmd := range jobCommands {
- switch c := cmd.(type) {
- case cmdPerJobBuffer:
- if summary.PerJobBuffer == nil {
- summary.PerJobBuffer = &c
- } else {
- return summary, fmt.Errorf("only one per job buffer allowed")
- }
- case cmdAttachPersistent:
- summary.Attachments = append(summary.Attachments, c)
- case cmdAttachPerJobSwap:
- if summary.Swap != nil {
- // TODO check amount isn't too big for per job buffer
- return summary, fmt.Errorf("only one swap request allowed")
- }
- summary.Swap = &c
- case cmdStageOutData:
- if summary.DataOut != nil {
- // TODO really should check if data out matches one of the requested buffers
- return summary, fmt.Errorf("only one per data out requested allowed")
- }
- summary.DataOut = &c
- case cmdStageInData:
- if summary.DataIn != nil {
- // TODO really should check if data in matches one of the requested buffers
- return summary, fmt.Errorf("only one per data in requested allowed")
- }
- summary.DataIn = &c
- default:
- // do nothing
- }
- }
- return summary, nil
-}
-
-type jobCommand interface{}
-
-type AccessMode int
-
-const (
- striped AccessMode = 0
- private = 1
- privateAndStriped = 2
-)
-
-var stringToAccessMode = map[string]AccessMode{
- "": striped,
- "striped": striped,
- "private": private,
- "private,striped": privateAndStriped,
- "striped,private": privateAndStriped,
-}
-
-func AccessModeFromString(raw string) AccessMode {
- return stringToAccessMode[strings.ToLower(raw)]
-}
-
-type BufferType int
-
-const (
- scratch BufferType = iota
- cache
-)
-
-var stringToBufferType = map[string]BufferType{
- "": scratch,
- "scratch": scratch,
- "cache": cache,
-}
-
-type cmdCreatePersistent struct {
- Name string
- CapacityBytes int
- AccessMode AccessMode
- BufferType BufferType
- GenericCmd bool
-}
-
-func BufferTypeFromString(raw string) BufferType {
- return stringToBufferType[strings.ToLower(raw)]
-}
-
-type cmdDestroyPersistent struct {
- Name string
-}
-
-type cmdAttachPersistent struct {
- Name string
-}
-
-type cmdPerJobBuffer struct {
- CapacityBytes int
- AccessMode AccessMode
- BufferType BufferType
- GenericCmd bool
-}
-
-type cmdAttachPerJobSwap struct {
- SizeBytes int
-}
-
-type StageType int
-
-const (
- directory StageType = iota
- file // TODO there is also list, but we ignore that for now
-)
-
-var stringToStageType = map[string]StageType{
- "": directory,
- "directory": directory,
- "file": file,
-}
-
-func stageTypeFromString(raw string) StageType {
- return stringToStageType[strings.ToLower(raw)]
-}
-
-type cmdStageInData struct {
- Source string
- Destination string
- StageType StageType
-}
-
-type cmdStageOutData struct {
- Source string
- Destination string
- StageType StageType
-}
-
-var sizeSuffixMulitiplyer = map[string]int{
- "TiB": 1099511627776,
- "TB": 1000000000000,
- "GiB": 1073741824,
- "GB": 1000000000,
- "MiB": 1048576,
- "MB": 1000000,
-}
-
-func parseSize(raw string) (int, error) {
- intVal, err := strconv.Atoi(raw)
- if err == nil {
- // specified raw bytes
- return intVal, nil
- }
- for suffix, multiplyer := range sizeSuffixMulitiplyer {
- if strings.HasSuffix(raw, suffix) {
- rawInt := strings.TrimSuffix(raw, suffix)
- intVal, err := strconv.Atoi(rawInt)
- if err != nil {
- return 0, err
- }
- return intVal * multiplyer, nil
- }
- }
- return 0, fmt.Errorf("unable to parse size: %s", raw)
-}
-
-func parseArgs(rawArgs []string) (map[string]string, error) {
- args := make(map[string]string, len(rawArgs))
- for _, arg := range rawArgs {
- parts := strings.Split(arg, "=")
- if len(parts) != 2 {
- return args, fmt.Errorf("unable to parse arg: %s", arg)
- }
- args[strings.ToLower(parts[0])] = parts[1]
- }
- return args, nil
-}
-
-func parseJobRequest(lines []string) ([]jobCommand, error) {
- var commands []jobCommand
- for _, line := range lines {
- tokens := strings.Split(line, " ")
- if len(tokens) < 3 {
- if line != "" && line != "#!/bin/bash" {
- log.Println("Skip badly formatted line:", line)
- }
- continue
- }
-
- cmdType := tokens[0]
- cmd := tokens[1]
- args := tokens[2:]
-
- var isGeneric bool
- switch cmdType {
- case "#DW":
- isGeneric = false
- case "#BB":
- isGeneric = true
- default:
- log.Println("unrecognised command type:", cmdType)
- continue
- }
-
- argKeyPair, _ := parseArgs(args) // TODO deal with errors when not swap
-
- var command jobCommand
- switch cmd {
- case "create_persistent":
- size, err := parseSize(argKeyPair["capacity"])
- if err != nil {
- log.Println(err)
- continue
- }
- command = cmdCreatePersistent{
- Name: argKeyPair["name"],
- CapacityBytes: size,
- GenericCmd: isGeneric,
- AccessMode: AccessModeFromString(argKeyPair["access_mode"]),
- BufferType: BufferTypeFromString(argKeyPair["type"]),
- }
- case "destroy_persistent":
- command = cmdDestroyPersistent{Name: argKeyPair["name"]}
- case "persistentdw":
- command = cmdAttachPersistent{Name: argKeyPair["name"]}
- case "jobdw":
- size, err := parseSize(argKeyPair["capacity"])
- if err != nil {
- log.Println(err)
- continue
- }
- command = cmdPerJobBuffer{
- CapacityBytes: size,
- GenericCmd: isGeneric,
- AccessMode: AccessModeFromString(argKeyPair["access_mode"]),
- BufferType: BufferTypeFromString(argKeyPair["type"]),
- }
- case "swap":
- if len(args) != 1 {
- log.Println("Unable to parse swap command:", line)
- }
- if size, err := parseSize(args[0]); err != nil {
- log.Println(err)
- continue
- } else {
- command = cmdAttachPerJobSwap{SizeBytes: size}
- }
- case "stage_in":
- command = cmdStageInData{
- Source: argKeyPair["source"],
- Destination: argKeyPair["destination"],
- StageType: stageTypeFromString(argKeyPair["type"]),
- }
- case "stage_out":
- command = cmdStageOutData{
- Source: argKeyPair["source"],
- Destination: argKeyPair["destination"],
- StageType: stageTypeFromString(argKeyPair["type"]),
- }
- default:
- log.Println("unrecognised command:", cmd, "with argument length", len(args))
- continue
- }
- commands = append(commands, command)
- }
- return commands, nil
-}
diff --git a/internal/pkg/dacctl/job_test.go b/internal/pkg/dacctl/job_test.go
deleted file mode 100644
index 3b590211..00000000
--- a/internal/pkg/dacctl/job_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package dacctl
-
-import (
- "github.com/stretchr/testify/assert"
- "log"
- "testing"
-)
-
-func TestParseJobRequest(t *testing.T) {
- jobRequest := []string{
- `#BB create_persistent name=myBBname capacity=100GB access_mode=striped type=scratch`,
- `#BB create_persistent name=myBBname capacity=1073741824 access_mode=striped type=cache`,
- `#BB destroy_persistent name=myBBname`,
- `#DW persistentdw name=myBBname1`,
- `#DW persistentdw name=myBBname2`,
- `#DW persistentdw name=myBBname2`,
- `#DW jobdw capacity=10GB access_mode=striped type=scratch`,
- `#DW jobdw capacity=2TB access_mode=private type=scratch`,
- `#DW jobdw capacity=4TiB access_mode=striped,private type=scratch`,
- `#BB jobdw capacity=42GiB access_mode=ldbalance type=cache pfs=/global/scratch1/john`,
- `#DW swap 3TiB`,
- `#DW stage_in source=/global/cscratch1/filename1 destination=$DW_JOB_STRIPED/filename1 type=file`,
- `#DW stage_out source=$DW_JOB_STRIPED/outdir destination=/global/scratch1/outdir type=directory`,
- }
- if cmds, err := parseJobRequest(jobRequest); err != nil {
- log.Fatal(err)
- } else {
- assert.Equal(t, 13, len(jobRequest)) // TODO should check returned values!!
- for _, cmd := range cmds {
- log.Printf("Cmd: %T Args: %s\n", cmd, cmd)
- }
- }
-}
-
-func TestGetJobSummary(t *testing.T) {
- lines := []string{
- `#DW persistentdw name=myBBname1`,
- `#DW persistentdw name=myBBname2`,
- `#DW jobdw capacity=4MiB access_mode=striped,private type=scratch`,
- `#DW swap 4MB`,
- `#DW stage_in source=/global/cscratch1/filename1 destination=$DW_JOB_STRIPED/filename1 type=file`,
- `#DW stage_out source=$DW_JOB_STRIPED/outdir destination=/global/scratch1/outdir type=directory`,
- }
- result, err := getJobSummary(lines)
-
- assert.Nil(t, err)
- assert.EqualValues(t, "/global/cscratch1/filename1", result.DataIn.Source)
- assert.EqualValues(t, "$DW_JOB_STRIPED/outdir", result.DataOut.Source)
-
- assert.Equal(t, 4194304, result.PerJobBuffer.CapacityBytes)
- assert.Equal(t, 4000000, result.Swap.SizeBytes)
-}
diff --git a/internal/pkg/dacctl/json.go b/internal/pkg/dacctl/json.go
deleted file mode 100644
index f79eaf44..00000000
--- a/internal/pkg/dacctl/json.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package dacctl
-
-import (
- "encoding/json"
- "log"
-)
-
-func toJson(message interface{}) string {
- b, error := json.Marshal(message)
- if error != nil {
- log.Fatal(error)
- }
- return string(b)
-}
diff --git a/internal/pkg/dacctl/persistent.go b/internal/pkg/dacctl/persistent.go
deleted file mode 100644
index 71dc1e79..00000000
--- a/internal/pkg/dacctl/persistent.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package dacctl
-
-import (
- "errors"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/lifecycle"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "math"
- "strings"
- "time"
-)
-
-type BufferRequest struct {
- Token string
- Caller string
- Capacity string
- User int
- Group int
- Access AccessMode
- Type BufferType
- Persistent bool
-}
-
-// Creates a persistent buffer.
-// If it works, we return the name of the buffer, otherwise an error is returned
-
-func parseCapacity(raw string) (string, int, error) {
- parts := strings.Split(raw, ":")
- if len(parts) != 2 {
- return "", 0, errors.New("must format capacity correctly and include pool")
- }
- pool := parts[0]
- rawCapacity := parts[1]
- sizeBytes, err := parseSize(rawCapacity)
- if err != nil {
- return "", 0, err
- }
- capacityInt := int(sizeBytes / bytesInGB)
- return pool, capacityInt, nil
-}
-
-func findPool(poolRegistry registry.PoolRegistry, poolName string) (pool *registry.Pool, err error) {
- pools, err := poolRegistry.Pools()
- if err != nil {
- return
- }
-
- for _, p := range pools {
- if p.Name == poolName {
- pool = &p
- }
- }
-
- if pool == nil {
- err = fmt.Errorf("unable to find pool: %s", poolName)
- return
- }
- return
-}
-
-func getPoolAndBrickCount(poolRegistry registry.PoolRegistry, capacity string) (pool *registry.Pool,
- bricksRequired uint, err error) {
-
- poolName, capacityGB, err := parseCapacity(capacity)
- if err != nil {
- return
- }
-
- pool, err = findPool(poolRegistry, poolName)
- if err != nil {
- return
- }
-
- bricksRequired = uint(math.Ceil(float64(capacityGB) / float64(pool.GranularityGB)))
- return
-}
-
-// TODO: ideally this would be private, if not for testing
-func CreateVolumesAndJobs(volReg registry.VolumeRegistry, poolRegistry registry.PoolRegistry,
- request BufferRequest) error {
-
- createdAt := uint(time.Now().Unix())
-
- pool, bricksRequired, err := getPoolAndBrickCount(poolRegistry, request.Capacity)
- if err != nil {
- return err
- }
- adjustedSizeGB := bricksRequired * pool.GranularityGB
-
- volume := registry.Volume{
- Name: registry.VolumeName(request.Token),
- JobName: request.Token,
- Owner: uint(request.User),
- CreatedAt: createdAt,
- CreatedBy: request.Caller,
- Group: uint(request.Group),
- SizeGB: adjustedSizeGB,
- SizeBricks: bricksRequired,
- Pool: pool.Name,
- State: registry.Registered,
- MultiJob: request.Persistent,
- }
- err = volReg.AddVolume(volume)
- if err != nil {
- return err
- }
-
- job := registry.Job{
- Name: request.Token,
- Owner: uint(request.User),
- CreatedAt: createdAt,
- JobVolume: volume.Name, // Even though its a persistent buffer, we add it here to ensure we delete buffer
- Paths: make(map[string]string),
- }
-
- err = volReg.AddJob(job)
- if err != nil {
- delErr := volReg.DeleteVolume(volume.Name)
- log.Println("volume deleted: ", delErr) // TODO: remove debug logs later, once understood
- return err
- }
-
- vlm := lifecycle.NewVolumeLifecycleManager(volReg, poolRegistry, volume)
- err = vlm.ProvisionBricks()
- if err != nil {
- log.Println("Bricks may be left behnd, not deleting volume due to: ", err)
- }
- return err
-}
diff --git a/internal/pkg/dacctl/pools.go b/internal/pkg/dacctl/pools.go
deleted file mode 100644
index b4a37e44..00000000
--- a/internal/pkg/dacctl/pools.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package dacctl
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
-)
-
-type pool struct {
- Id string `json:"id"`
- Units string `json:"units"`
- Granularity uint `json:"granularity"`
- Quantity uint `json:"quantity"`
- Free uint `json:"free"`
-}
-
-type pools []pool
-
-func (list *pools) String() string {
- message := map[string]pools{"pools": *list}
- return toJson(message)
-}
-
-const GbInBytes = 1073741824
-
-func GetPools(registry registry.PoolRegistry) (*pools, error) {
- pools := pools{}
- regPools, err := registry.Pools()
- if err != nil {
- return &pools, err
- }
-
- for _, regPool := range regPools {
- free := len(regPool.AvailableBricks)
- quantity := free + len(regPool.AllocatedBricks)
- pools = append(pools, pool{
- Id: regPool.Name,
- Units: "bytes",
- Granularity: regPool.GranularityGB * GbInBytes,
- Quantity: uint(quantity),
- Free: uint(free),
- })
- }
- return &pools, nil
-}
diff --git a/internal/pkg/dacctl/pools_test.go b/internal/pkg/dacctl/pools_test.go
deleted file mode 100644
index c47b644c..00000000
--- a/internal/pkg/dacctl/pools_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package dacctl
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/mocks"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestGetPools(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- mockReg := mocks.NewMockPoolRegistry(mockCtrl)
- fakePools := func() ([]registry.Pool, error) {
- return []registry.Pool{{Name: "fake", GranularityGB: 1}}, nil
- }
- mockReg.EXPECT().Pools().DoAndReturn(fakePools)
-
- pools, _ := GetPools(mockReg)
- actual := pools.String()
- expected := `{"pools":[{"id":"fake","units":"bytes","granularity":1073741824,"quantity":0,"free":0}]}`
- assert.EqualValues(t, expected, actual)
-}
diff --git a/internal/pkg/dacctl/show.go b/internal/pkg/dacctl/show.go
deleted file mode 100644
index b0df92b4..00000000
--- a/internal/pkg/dacctl/show.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package dacctl
-
-import "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
-
-type instanceCapacity struct {
- Bytes uint `json:"bytes"`
- Nodes uint `json:"nodes"`
-}
-
-type instanceLinks struct {
- Session string `json:"session"`
-}
-
-type instance struct {
- Id string `json:"id"`
- Capacity instanceCapacity `json:"capacity"`
- Links instanceLinks `json:"links"`
-}
-
-type instances []instance
-
-func (list *instances) String() string {
- message := map[string]instances{"instances": *list}
- return toJson(message)
-}
-
-const bytesInGB = 1073741824
-
-func GetInstances(volRegistry registry.VolumeRegistry) (*instances, error) {
- instances := instances{}
- volumes, err := volRegistry.AllVolumes()
- if err != nil {
- // TODO... normally means there are no instances
- }
-
- for _, volume := range volumes {
- instances = append(instances, instance{
- Id: string(volume.Name),
- Capacity: instanceCapacity{Bytes: volume.SizeGB * bytesInGB, Nodes: volume.SizeBricks},
- Links: instanceLinks{volume.JobName},
- })
- }
- return &instances, nil
-}
-
-type session struct {
- Id string `json:"id"`
- Created uint `json:"created"`
- Owner uint `json:"owner"`
- Token string `json:"token"`
-}
-
-type sessions []session
-
-func (list *sessions) String() string {
- message := map[string]sessions{"sessions": *list}
- return toJson(message)
-}
-
-func GetSessions(volRegistry registry.VolumeRegistry) (*sessions, error) {
- jobs, err := volRegistry.Jobs()
- if err != nil {
- // TODO: error is usually about there not being any jobs
- jobs = []registry.Job{}
- }
- sessions := sessions{}
- for _, job := range jobs {
- sessions = append(sessions, session{
- Id: job.Name,
- Created: job.CreatedAt,
- Owner: job.Owner,
- Token: job.Name,
- })
- }
- return &sessions, nil
-}
-
-type configurations []string
-
-func (list *configurations) String() string {
- message := map[string]configurations{"configurations": *list}
- return toJson(message)
-}
-
-func GetConfigurations() *configurations {
- return &configurations{}
-}
diff --git a/internal/pkg/dacctl/show_test.go b/internal/pkg/dacctl/show_test.go
deleted file mode 100644
index 3b881e11..00000000
--- a/internal/pkg/dacctl/show_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package dacctl
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/mocks"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "log"
- "testing"
-)
-
-func assertNewline(t *testing.T, actual string) {
- assert.EqualValues(t, "\n", actual[len(actual)-1:])
-}
-
-func TestGetInstances(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockReg := mocks.NewMockVolumeRegistry(mockCtrl)
- fakeGetVolumes := func() ([]registry.Volume, error) {
- return []registry.Volume{
- {Name: "fake1", Pool: "pool1", SizeGB: 2},
- }, nil
- }
- mockReg.EXPECT().AllVolumes().DoAndReturn(fakeGetVolumes)
-
- instances, err := GetInstances(mockReg)
- if err != nil {
- log.Fatal(err)
- }
- actual := instances.String()
-
- // TODO need to return sessions correctly... i.e. job
- expected := `{"instances":[{"id":"fake1","capacity":{"bytes":2147483648,"nodes":0},"links":{"session":""}}]}`
- assert.EqualValues(t, expected, actual)
-}
-
-func TestGetSessions(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockReg := mocks.NewMockVolumeRegistry(mockCtrl)
- mockJobs := func() ([]registry.Job, error) {
- return []registry.Job{{Name: "fake1", CreatedAt: 42, Owner: 1001}}, nil
-
- }
- mockReg.EXPECT().Jobs().DoAndReturn(mockJobs)
- sessions, err := GetSessions(mockReg)
- if err != nil {
- log.Fatal(err)
- }
- actual := sessions.String()
-
- expected := `{"sessions":[{"id":"fake1","created":42,"owner":1001,"token":"fake1"}]}`
- assert.EqualValues(t, expected, actual)
-}
-
-func TestGetConfigurations(t *testing.T) {
- actual := GetConfigurations().String()
- expected := `{"configurations":[]}`
- assert.EqualValues(t, expected, actual)
-}
diff --git a/internal/pkg/dacctl/workflow_impl/session.go b/internal/pkg/dacctl/workflow_impl/session.go
new file mode 100644
index 00000000..c43f4e04
--- /dev/null
+++ b/internal/pkg/dacctl/workflow_impl/session.go
@@ -0,0 +1,297 @@
+package workflow_impl
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+ "math"
+ "math/rand"
+ "time"
+)
+
+func NewSessionFacade(keystore store.Keystore) facade.Session {
+ return sessionFacade{
+ session: registry_impl.NewSessionRegistry(keystore),
+ actions: registry_impl.NewSessionActionsRegistry(keystore),
+ allocations: registry_impl.NewAllocationRegistry(keystore),
+ ansible: filesystem_impl.NewAnsible(),
+ }
+}
+
+type sessionFacade struct {
+ session registry.SessionRegistry
+ actions registry.SessionActions
+ allocations registry.AllocationRegistry
+ ansible filesystem.Ansible
+}
+
+func (s sessionFacade) submitJob(sessionName datamodel.SessionName, actionType datamodel.SessionActionType,
+ getSession func() (datamodel.Session, error)) error {
+ // 30 min timeout to aquire lock and send action
+ ctxt, cancelFunc := context.WithTimeout(context.Background(), time.Minute*30)
+ defer func() {
+ cancelFunc()
+ }()
+
+ sessionMutex, err := s.session.GetSessionMutex(sessionName)
+ if err != nil {
+ return fmt.Errorf("unable to get session mutex: %s due to: %s", sessionName, err)
+ }
+ err = sessionMutex.Lock(ctxt)
+ if err != nil {
+ return fmt.Errorf("unable to lock session mutex: %s due to: %s", sessionName, err)
+ }
+
+ session, err := getSession()
+ if err != nil {
+ unlockErr := sessionMutex.Unlock(context.TODO())
+ if unlockErr != nil {
+ log.Println("failed to drop mutex", unlockErr)
+ }
+ return err
+ }
+ if session.Name == "" {
+ // skip processing for this session
+ // e.g. its a delete and we have already been deleted
+ unlockErr := sessionMutex.Unlock(context.TODO())
+ if unlockErr != nil {
+ log.Println("failed to drop mutex", unlockErr)
+ }
+ return nil
+ }
+
+ // This will error out if the host is not currently up
+ sessionActions, err := s.actions.SendSessionAction(ctxt, actionType, session)
+ // Drop mutex regardless of if we had an error or not
+ mutexErr := sessionMutex.Unlock(context.TODO())
+ if err != nil {
+ return err
+ }
+ if mutexErr != nil {
+ return mutexErr
+ }
+
+ // ensure we get one value, and the channel is closed
+ var finalResult *datamodel.SessionAction
+ for action := range sessionActions {
+ if finalResult != nil {
+ log.Panicf("unexpected mulitple actions")
+ }
+ finalResult = &action
+ }
+ if finalResult == nil {
+ log.Panicf("failed to get reponse")
+ }
+
+ // report and errors in the server response
+ if finalResult.Error != "" {
+ return errors.New(finalResult.Error)
+ }
+ return nil
+}
+
+func (s sessionFacade) CreateSession(session datamodel.Session) error {
+ err := s.validateSession(session)
+ if err != nil {
+ return err
+ }
+
+ return s.submitJob(session.Name, datamodel.SessionCreateFilesystem,
+ func() (datamodel.Session, error) {
+ // Allocate bricks, and choose brick host server
+ session, err := s.doAllocationAndWriteSession(session)
+ if err != nil {
+ return session, err
+ }
+ if session.ActualSizeBytes == 0 {
+ // Skip creating an empty filesystem
+ return datamodel.Session{}, nil
+ }
+ return session, nil
+ })
+}
+
+func (s sessionFacade) validateSession(session datamodel.Session) error {
+ _, err := s.allocations.GetPool(session.VolumeRequest.PoolName)
+ if err != nil {
+ return fmt.Errorf("invalid session, unable to find pool %s", session.VolumeRequest.PoolName)
+ }
+ // TODO: validate multi-job volumes exist
+ // TODO: check for multi-job restrictions, etc?
+ return nil
+}
+
+func (s sessionFacade) doAllocationAndWriteSession(session datamodel.Session) (datamodel.Session, error) {
+ if session.VolumeRequest.TotalCapacityBytes > 0 {
+ allocationMutex, err := s.allocations.GetAllocationMutex()
+ if err != nil {
+ return session, err
+ }
+
+ err = allocationMutex.Lock(context.TODO())
+ if err != nil {
+ return session, err
+ }
+ defer allocationMutex.Unlock(context.TODO())
+
+ // Write allocations before creating the session
+ actualSizeBytes, chosenBricks, err := s.getBricks(session.VolumeRequest.PoolName, session.VolumeRequest.TotalCapacityBytes)
+ if err != nil {
+ return session, fmt.Errorf("can't allocate for session: %s due to %s", session.Name, err)
+ }
+
+ session.ActualSizeBytes = actualSizeBytes
+ session.AllocatedBricks = chosenBricks
+ session.PrimaryBrickHost = chosenBricks[0].BrickHostName
+ } else {
+ // Pick a random alive host to be the PrimaryBrickHost anyway
+ pools, err := s.allocations.GetAllPoolInfos()
+ if err != nil {
+ return session, err
+ }
+ if len(pools) == 0 {
+ return session, fmt.Errorf("unable to find any pools")
+ }
+ // TODO: need to pick the default pool, but right now only one
+ poolInfo := pools[0]
+ bricks := pickBricks(1, poolInfo)
+ session.PrimaryBrickHost = bricks[0].BrickHostName
+ }
+
+ // Store initial version of session
+ // returned session will have updated revision info
+ return s.session.CreateSession(session)
+}
+
+func (s sessionFacade) getBricks(poolName datamodel.PoolName, bytes int) (int, []datamodel.Brick, error) {
+ pool, err := s.allocations.GetPoolInfo(poolName)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ bricksRequired := int(math.Ceil(float64(bytes) / float64(pool.Pool.GranularityBytes)))
+ actualSize := bricksRequired * int(pool.Pool.GranularityBytes)
+
+ bricks := pickBricks(bricksRequired, pool)
+ if len(bricks) != bricksRequired {
+ return 0, nil, fmt.Errorf(
+ "unable to get number of requested bricks (%d) for given pool (%s)",
+ bricksRequired, pool.Pool.Name)
+ }
+ return actualSize, bricks, nil
+}
+
+func pickBricks(bricksRequired int, poolInfo datamodel.PoolInfo) []datamodel.Brick {
+ // pick some of the available bricks
+ s := rand.NewSource(time.Now().Unix())
+ r := rand.New(s) // initialize local pseudorandom generator
+
+ var chosenBricks []datamodel.Brick
+ randomWalk := r.Perm(len(poolInfo.AvailableBricks))
+ for _, i := range randomWalk {
+ candidateBrick := poolInfo.AvailableBricks[i]
+
+ // TODO: should not the random walk mean this isn't needed!
+ goodCandidate := true
+ for _, brick := range chosenBricks {
+ if brick == candidateBrick {
+ goodCandidate = false
+ break
+ }
+ }
+ if goodCandidate {
+ chosenBricks = append(chosenBricks, candidateBrick)
+ }
+ if len(chosenBricks) >= bricksRequired {
+ break
+ }
+ }
+ return chosenBricks
+}
+
+func (s sessionFacade) DeleteSession(sessionName datamodel.SessionName, hurry bool) error {
+ return s.submitJob(sessionName, datamodel.SessionDelete,
+ func() (datamodel.Session, error) {
+ session, err := s.session.GetSession(sessionName)
+ if err != nil {
+ log.Println("Unable to find session, skipping delete:", sessionName)
+ return session, nil
+ }
+
+ if session.Status.DeleteRequested {
+ // TODO: is there anything we can do about this?
+ log.Println("Warning, delete already called")
+ }
+
+ // Record we want this deleted, in case host is not alive
+ // can be deleted when it is next stated
+ session.Status.DeleteRequested = true
+ session.Status.DeleteSkipCopyDataOut = hurry
+ return s.session.UpdateSession(session)
+ })
+}
+
+func (s sessionFacade) CopyDataIn(sessionName datamodel.SessionName) error {
+ return s.submitJob(sessionName, datamodel.SessionCopyDataIn,
+ func() (datamodel.Session, error) {
+ return s.session.GetSession(sessionName)
+ })
+}
+
+func (s sessionFacade) Mount(sessionName datamodel.SessionName, computeNodes []string, loginNodes []string) error {
+ return s.submitJob(sessionName, datamodel.SessionMount,
+ func() (datamodel.Session, error) {
+ session, err := s.session.GetSession(sessionName)
+ if err != nil {
+ log.Println("Unable to find session we want to mount:", sessionName)
+ return session, err
+ }
+
+ // TODO: what about the login nodes? what do we want to do there?
+ session.RequestedAttachHosts = computeNodes
+ return s.session.UpdateSession(session)
+ })
+}
+
+func (s sessionFacade) Unmount(sessionName datamodel.SessionName) error {
+ return s.submitJob(sessionName, datamodel.SessionUnmount,
+ func() (datamodel.Session, error) {
+ return s.session.GetSession(sessionName)
+ })
+}
+
+func (s sessionFacade) CopyDataOut(sessionName datamodel.SessionName) error {
+ return s.submitJob(sessionName, datamodel.SessionCopyDataOut,
+ func() (datamodel.Session, error) {
+ return s.session.GetSession(sessionName)
+ })
+}
+
+func (s sessionFacade) GetPools() ([]datamodel.PoolInfo, error) {
+ return s.allocations.GetAllPoolInfos()
+}
+
+func (s sessionFacade) GetSession(sessionName datamodel.SessionName) (datamodel.Session, error) {
+ return s.session.GetSession(sessionName)
+}
+
+func (s sessionFacade) GetAllSessions() ([]datamodel.Session, error) {
+ return s.session.GetAllSessions()
+}
+
+func (s sessionFacade) GenerateAnsible(sessionName datamodel.SessionName) (string, error) {
+ session, err := s.session.GetSession(sessionName)
+ if err != nil {
+ log.Println("Unable to find session we want to mount:", sessionName)
+ return "", err
+ }
+ return s.ansible.CreateEnvironment(session)
+}
diff --git a/internal/pkg/dacctl/workflow_impl/session_test.go b/internal/pkg/dacctl/workflow_impl/session_test.go
new file mode 100644
index 00000000..fccc9cd9
--- /dev/null
+++ b/internal/pkg/dacctl/workflow_impl/session_test.go
@@ -0,0 +1,176 @@
+package workflow_impl
+
+import (
+ "context"
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_store"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSessionFacade_CreateSession_NoBricks(t *testing.T) {
+ initialSession := datamodel.Session{
+ Name: "foo",
+ VolumeRequest: datamodel.VolumeRequest{
+ PoolName: datamodel.PoolName("pool1"),
+ TotalCapacityBytes: 0,
+ },
+ }
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ actions := mock_registry.NewMockSessionActions(mockCtrl)
+ sessionRegistry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ allocations := mock_registry.NewMockAllocationRegistry(mockCtrl)
+ facade := sessionFacade{
+ session: sessionRegistry, actions: actions, allocations: allocations,
+ }
+
+ allocations.EXPECT().GetPool(datamodel.PoolName("pool1")).Return(datamodel.Pool{Name: "pool1"}, nil)
+ sessionMutex := mock_store.NewMockMutex(mockCtrl)
+ sessionRegistry.EXPECT().GetSessionMutex(initialSession.Name).Return(sessionMutex, nil)
+ sessionMutex.EXPECT().Lock(gomock.Any())
+ brickList := []datamodel.Brick{{Device: "sda", BrickHostName: datamodel.BrickHostName("host1")}}
+ allocations.EXPECT().GetAllPoolInfos().Return([]datamodel.PoolInfo{{AvailableBricks: brickList}}, nil)
+ initialSession.PrimaryBrickHost = "host1"
+ sessionRegistry.EXPECT().CreateSession(initialSession).Return(initialSession, nil)
+ sessionMutex.EXPECT().Unlock(context.TODO())
+
+ err := facade.CreateSession(initialSession)
+
+ assert.Nil(t, err)
+}
+
+func TestSessionFacade_CreateSession_WithBricks_AllocationError(t *testing.T) {
+ initialSession := datamodel.Session{
+ Name: "foo",
+ VolumeRequest: datamodel.VolumeRequest{
+ PoolName: datamodel.PoolName("pool1"),
+ TotalCapacityBytes: 2048,
+ },
+ }
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ actions := mock_registry.NewMockSessionActions(mockCtrl)
+ sessionRegistry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ allocations := mock_registry.NewMockAllocationRegistry(mockCtrl)
+ facade := sessionFacade{
+ session: sessionRegistry, actions: actions, allocations: allocations,
+ }
+
+ allocations.EXPECT().GetPool(datamodel.PoolName("pool1")).Return(datamodel.Pool{Name: "pool1"}, nil)
+ sessionMutex := mock_store.NewMockMutex(mockCtrl)
+ sessionRegistry.EXPECT().GetSessionMutex(initialSession.Name).Return(sessionMutex, nil)
+ sessionMutex.EXPECT().Lock(gomock.Any())
+ allocationMutex := mock_store.NewMockMutex(mockCtrl)
+ allocations.EXPECT().GetAllocationMutex().Return(allocationMutex, nil)
+ allocationMutex.EXPECT().Lock(context.TODO())
+ allocations.EXPECT().GetPoolInfo(initialSession.VolumeRequest.PoolName).Return(datamodel.PoolInfo{
+ Pool: datamodel.Pool{
+ Name: "pool1", GranularityBytes: 1024,
+ },
+ }, nil)
+ allocationMutex.EXPECT().Unlock(context.TODO())
+ sessionMutex.EXPECT().Unlock(context.TODO())
+
+ err := facade.CreateSession(initialSession)
+
+ assert.Equal(t, "can't allocate for session: foo due to unable to get number of requested bricks (2) for given pool (pool1)", err.Error())
+}
+
+func TestSessionFacade_CreateSession_WithBricks_CreateSessionError(t *testing.T) {
+ initialSession := datamodel.Session{
+ Name: "foo",
+ VolumeRequest: datamodel.VolumeRequest{
+ PoolName: datamodel.PoolName("pool1"),
+ TotalCapacityBytes: 1024,
+ },
+ }
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ actions := mock_registry.NewMockSessionActions(mockCtrl)
+ sessionRegistry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ allocations := mock_registry.NewMockAllocationRegistry(mockCtrl)
+ facade := sessionFacade{
+ session: sessionRegistry, actions: actions, allocations: allocations,
+ }
+
+ allocations.EXPECT().GetPool(datamodel.PoolName("pool1")).Return(datamodel.Pool{Name: "pool1"}, nil)
+ sessionMutex := mock_store.NewMockMutex(mockCtrl)
+ sessionRegistry.EXPECT().GetSessionMutex(initialSession.Name).Return(sessionMutex, nil)
+ sessionMutex.EXPECT().Lock(gomock.Any())
+ allocationMutex := mock_store.NewMockMutex(mockCtrl)
+ allocations.EXPECT().GetAllocationMutex().Return(allocationMutex, nil)
+ allocationMutex.EXPECT().Lock(context.TODO())
+ brickList := []datamodel.Brick{{Device: "sda", BrickHostName: datamodel.BrickHostName("host1")}}
+ allocations.EXPECT().GetPoolInfo(initialSession.VolumeRequest.PoolName).Return(datamodel.PoolInfo{
+ Pool: datamodel.Pool{
+ Name: "pool1", GranularityBytes: 1024,
+ },
+ AvailableBricks: brickList,
+ }, nil)
+ updatedSession := datamodel.Session{
+ Name: "foo",
+ VolumeRequest: datamodel.VolumeRequest{
+ PoolName: datamodel.PoolName("pool1"),
+ TotalCapacityBytes: 1024,
+ },
+ ActualSizeBytes: 1024,
+ AllocatedBricks: brickList,
+ PrimaryBrickHost: brickList[0].BrickHostName,
+ }
+ returnedSession := datamodel.Session{
+ Name: "foo",
+ ActualSizeBytes: 1024,
+ }
+ sessionRegistry.EXPECT().CreateSession(updatedSession).Return(returnedSession, nil)
+ allocationMutex.EXPECT().Unlock(context.TODO())
+ fakeErr := errors.New("fake")
+ actionChan := make(chan datamodel.SessionAction)
+ actions.EXPECT().SendSessionAction(gomock.Any(), datamodel.SessionCreateFilesystem, returnedSession).Return(actionChan, nil)
+ sessionMutex.EXPECT().Unlock(context.TODO())
+ go func() {
+ actionChan <- datamodel.SessionAction{Error: fakeErr.Error()}
+ close(actionChan)
+ }()
+
+ err := facade.CreateSession(initialSession)
+
+ assert.Equal(t, fakeErr, err)
+}
+
+func TestSessionFacade_DeleteSession(t *testing.T) {
+ sessionName := datamodel.SessionName("foo")
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ actions := mock_registry.NewMockSessionActions(mockCtrl)
+ sessionRegistry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ facade := sessionFacade{session: sessionRegistry, actions: actions}
+ sessionMutex := mock_store.NewMockMutex(mockCtrl)
+ sessionRegistry.EXPECT().GetSessionMutex(sessionName).Return(sessionMutex, nil)
+ sessionMutex.EXPECT().Lock(gomock.Any())
+ initialSession := datamodel.Session{Name: "foo"}
+ sessionRegistry.EXPECT().GetSession(sessionName).Return(initialSession, nil)
+ updatedSession := datamodel.Session{
+ Name: "foo",
+ Status: datamodel.SessionStatus{
+ DeleteRequested: true,
+ DeleteSkipCopyDataOut: true,
+ },
+ }
+ sessionRegistry.EXPECT().UpdateSession(updatedSession).Return(initialSession, nil)
+ actionChan := make(chan datamodel.SessionAction)
+ actions.EXPECT().SendSessionAction(gomock.Any(), datamodel.SessionDelete, initialSession).Return(actionChan, nil)
+ sessionMutex.EXPECT().Unlock(context.TODO())
+ fakeErr := errors.New("fake")
+ go func() {
+ actionChan <- datamodel.SessionAction{Error: fakeErr.Error()}
+ close(actionChan)
+ }()
+
+ err := facade.DeleteSession(sessionName, true)
+
+ assert.Equal(t, fakeErr, err)
+}
diff --git a/internal/pkg/dacd/brick_manager_impl/brick_manager.go b/internal/pkg/dacd/brick_manager_impl/brick_manager.go
new file mode 100644
index 00000000..1cd4e68b
--- /dev/null
+++ b/internal/pkg/dacd/brick_manager_impl/brick_manager.go
@@ -0,0 +1,122 @@
+package brick_manager_impl
+
+import (
+ "context"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacd"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+)
+
+func NewBrickManager(keystore store.Keystore) dacd.BrickManager {
+ return &brickManager{
+ config: config.GetBrickManagerConfig(config.DefaultEnv),
+ brickRegistry: registry_impl.NewBrickHostRegistry(keystore),
+ sessionRegistry: registry_impl.NewSessionRegistry(keystore),
+ sessionActions: registry_impl.NewSessionActionsRegistry(keystore),
+ sessionActionHandler: NewSessionActionHandler(keystore),
+ }
+}
+
+type brickManager struct {
+ config config.BrickManagerConfig
+ brickRegistry registry.BrickHostRegistry
+ sessionRegistry registry.SessionRegistry
+ sessionActions registry.SessionActions
+ sessionActionHandler facade.SessionActionHandler
+}
+
+func (bm *brickManager) Hostname() string {
+ return string(bm.config.BrickHostName)
+}
+
+func (bm *brickManager) Startup() {
+ // TODO: should we get the allocation mutex until we are started the keep alive?
+ // TODO: add a drain configuration?
+
+ err := bm.brickRegistry.UpdateBrickHost(getBrickHost(bm.config))
+ if err != nil {
+ log.Panicf("failed to update brick host: %s", err)
+ }
+
+ // If we are are enabled, this includes new create session requests
+ events, err := bm.sessionActions.GetSessionActionRequests(context.TODO(), bm.config.BrickHostName)
+
+ // Assume we got restarted, first try to finish all pending actions
+ bm.completePendingActions()
+
+ // If we were restarted, likely no one is listening for pending actions any more
+ // so don'y worry the above pending actions may have failed due to not restoring sessions first
+ bm.restoreSessions()
+
+ // Tell everyone we are listening
+ err = bm.brickRegistry.KeepAliveHost(context.TODO(), bm.config.BrickHostName)
+ if err != nil {
+ log.Panicf("failed to start keep alive host: %s", err)
+ }
+
+ // Process any events, given others know we are alive
+ go func() {
+ for event := range events {
+ // TODO: we could limit the number of workers
+ go bm.sessionActionHandler.ProcessSessionAction(event)
+ }
+ log.Println("ERROR: stopped waiting for new Session Actions")
+ }()
+}
+
+func (bm *brickManager) completePendingActions() {
+ // Assume the service has been restarted, lets
+ // retry any actions that haven't been completed
+ // making the assumption that actions are idempotent
+ actions, err := bm.sessionActions.GetOutstandingSessionActionRequests(bm.config.BrickHostName)
+ if err != nil {
+ log.Fatalf("unable to get outstanding session action requests due to: %s", err.Error())
+ }
+
+ // We wait for these to finish before starting keepalive
+ for _, action := range actions {
+ // TODO: what about the extra response if no one is listening any more?
+ bm.sessionActionHandler.ProcessSessionAction(action)
+ }
+}
+
+func (bm *brickManager) restoreSessions() {
+ // In case the server was restarted, double check everything is up
+ // If marked deleted, and not already deleted, delete it
+ sessions, err := bm.sessionRegistry.GetAllSessions()
+ if err != nil {
+ log.Panicf("unable to fetch all sessions due to: %s", err)
+ }
+ for _, session := range sessions {
+ hasLocalBrick := false
+ for _, brick := range session.AllocatedBricks {
+ if brick.BrickHostName == bm.config.BrickHostName {
+ hasLocalBrick = true
+ }
+ }
+ if !hasLocalBrick {
+ continue
+ }
+
+ if session.Status.FileSystemCreated && !session.Status.DeleteRequested {
+ // If we have previously finished creating the session,
+ // and we don't have a pending delete, try to restore the session
+ log.Println("Restoring session with local brick", session.Name)
+ go bm.sessionActionHandler.RestoreSession(session)
+ } else {
+ // TODO: should we just do the delete here?
+ log.Printf("WARNING session in strange state: %+v\n", session)
+ }
+ }
+}
+
+func (bm *brickManager) Shutdown() {
+ // Delete the keepalive key, to stop new actions being sent
+ // Wait for existing actions by trying to get a lock on all
+ // sessions we for which we are the primary brick
+ // TODO...
+}
diff --git a/internal/pkg/dacd/brick_manager_impl/brick_manager_test.go b/internal/pkg/dacd/brick_manager_impl/brick_manager_test.go
new file mode 100644
index 00000000..a6e10cff
--- /dev/null
+++ b/internal/pkg/dacd/brick_manager_impl/brick_manager_test.go
@@ -0,0 +1,44 @@
+package brick_manager_impl
+
+import (
+ "context"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_registry"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestBrickManager_Hostname(t *testing.T) {
+ brickManager := brickManager{config: config.BrickManagerConfig{BrickHostName: "host"}}
+ assert.Equal(t, "host", brickManager.Hostname())
+}
+
+func TestBrickManager_Startup(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ brickRegistry := mock_registry.NewMockBrickHostRegistry(mockCtrl)
+ sessionActions := mock_registry.NewMockSessionActions(mockCtrl)
+ sessionRegistry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ handler := mock_facade.NewMockSessionActionHandler(mockCtrl)
+ brickManager := brickManager{
+ config: config.GetBrickManagerConfig(config.DefaultEnv),
+ brickRegistry: brickRegistry,
+ sessionActions: sessionActions,
+ sessionActionHandler: handler,
+ sessionRegistry: sessionRegistry,
+ }
+
+ // TODO...
+ brickRegistry.EXPECT().UpdateBrickHost(gomock.Any())
+ sessionActions.EXPECT().GetSessionActionRequests(context.TODO(), gomock.Any())
+ sessionActions.EXPECT().GetOutstandingSessionActionRequests(brickManager.config.BrickHostName)
+ sessionRegistry.EXPECT().GetAllSessions()
+ hostname, _ := os.Hostname()
+ brickRegistry.EXPECT().KeepAliveHost(context.TODO(), datamodel.BrickHostName(hostname))
+
+ brickManager.Startup()
+}
diff --git a/internal/pkg/dacd/brick_manager_impl/host_bricks.go b/internal/pkg/dacd/brick_manager_impl/host_bricks.go
new file mode 100644
index 00000000..9d149b7e
--- /dev/null
+++ b/internal/pkg/dacd/brick_manager_impl/host_bricks.go
@@ -0,0 +1,35 @@
+package brick_manager_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+)
+
+func getDevices(brickManagerConfig config.BrickManagerConfig) []string {
+ // TODO: should check these devices exist
+ var bricks []string
+ for i := 0; i < int(brickManagerConfig.DeviceCount); i++ {
+ device := fmt.Sprintf(brickManagerConfig.DeviceAddressPattern, i)
+ bricks = append(bricks, device)
+ }
+ return bricks
+}
+
+func getBrickHost(brickManagerConfig config.BrickManagerConfig) datamodel.BrickHost {
+ var bricks []datamodel.Brick
+ for _, device := range getDevices(brickManagerConfig) {
+ bricks = append(bricks, datamodel.Brick{
+ Device: device,
+ BrickHostName: brickManagerConfig.BrickHostName,
+ PoolName: brickManagerConfig.PoolName,
+ CapacityGiB: brickManagerConfig.DeviceCapacityGiB,
+ })
+ }
+
+ return datamodel.BrickHost{
+ Name: brickManagerConfig.BrickHostName,
+ Bricks: bricks,
+ Enabled: brickManagerConfig.HostEnabled,
+ }
+}
diff --git a/internal/pkg/dacd/brick_manager_impl/session_action_handler.go b/internal/pkg/dacd/brick_manager_impl/session_action_handler.go
new file mode 100644
index 00000000..85d9880f
--- /dev/null
+++ b/internal/pkg/dacd/brick_manager_impl/session_action_handler.go
@@ -0,0 +1,422 @@
+package brick_manager_impl
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/facade"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry_impl"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+)
+
+func NewSessionActionHandler(keystore store.Keystore) facade.SessionActionHandler {
+ return &sessionActionHandler{
+ registry_impl.NewSessionRegistry(keystore),
+ registry_impl.NewSessionActionsRegistry(keystore),
+ // TODO: fix up fsprovider!!
+ filesystem_impl.NewFileSystemProvider(nil),
+ false,
+ }
+}
+
+type sessionActionHandler struct {
+ sessionRegistry registry.SessionRegistry
+ actions registry.SessionActions
+ fsProvider filesystem.Provider
+ skipActions bool
+}
+
+func (s *sessionActionHandler) ProcessSessionAction(action datamodel.SessionAction) {
+ switch action.ActionType {
+ case datamodel.SessionDelete:
+ s.handleDelete(action)
+ case datamodel.SessionCreateFilesystem:
+ s.handleCreate(action)
+ case datamodel.SessionCopyDataIn:
+ s.handleCopyIn(action)
+ case datamodel.SessionMount:
+ s.handleMount(action)
+ case datamodel.SessionUnmount:
+ s.handleUnmount(action)
+ case datamodel.SessionCopyDataOut:
+ s.handleCopyOut(action)
+ default:
+ log.Panicf("not yet implemented action for %+v", action)
+ }
+}
+
+func (s *sessionActionHandler) processWithMutex(action datamodel.SessionAction, process func() (datamodel.Session, error)) {
+
+ sessionName := action.Session.Name
+ sessionMutex, err := s.sessionRegistry.GetSessionMutex(sessionName)
+ if err != nil {
+ log.Printf("unable to get session mutex: %s due to: %s\n", sessionName, err)
+ action.Error = err.Error()
+ return
+ }
+ err = sessionMutex.Lock(context.TODO())
+ if err != nil {
+ log.Printf("unable to lock session mutex: %s due to: %s\n", sessionName, err)
+ action.Error = err.Error()
+ return
+ }
+
+ // Always complete action and drop mutex on function exit
+ defer func() {
+ if err := s.actions.CompleteSessionAction(action); err != nil {
+ log.Printf("failed to complete action %+v due to: %s\n", action, err.Error())
+ }
+ if err := sessionMutex.Unlock(context.TODO()); err != nil {
+ log.Printf("failed to drop mutex for: %s due to: %s\n", sessionName, err.Error())
+ }
+ }()
+
+ log.Printf("starting action %+v\n", action)
+
+ session, err := process()
+ if err != nil {
+ action.Error = err.Error()
+ log.Printf("error during action %+v\n", action)
+ } else {
+ action.Session = session
+ log.Printf("finished action %+v\n", action)
+ }
+}
+
+func (s *sessionActionHandler) handleCreate(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ session := action.Session
+ // Nothing to create, just complete the action
+ // TODO: why do we send the action?
+ if session.ActualSizeBytes == 0 {
+ return session, nil
+ }
+
+ // Get latest session now we have the mutex
+ session, err := s.sessionRegistry.GetSession(session.Name)
+ if err != nil {
+ return session, fmt.Errorf("error getting session: %s", err)
+ }
+ if session.Status.DeleteRequested {
+ return session, fmt.Errorf("can't do action once delete has been requested for")
+ }
+
+ fsStatus, err := s.fsProvider.Create(session)
+ session.FilesystemStatus = fsStatus
+ session.Status.FileSystemCreated = err == nil
+ if err != nil {
+ session.Status.Error = err.Error()
+ }
+
+ session, updateErr := s.sessionRegistry.UpdateSession(session)
+ if updateErr != nil {
+ log.Println("Failed to update session:", updateErr)
+ if err == nil {
+ err = updateErr
+ }
+ }
+ return session, err
+ })
+}
+
+func (s *sessionActionHandler) handleDelete(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ session, err := s.sessionRegistry.GetSession(action.Session.Name)
+ if err != nil {
+ // TODO: deal with already being deleted? add check if exists call?
+ return action.Session, fmt.Errorf("error getting session: %s", err)
+ }
+
+ if !session.Status.UnmountComplete {
+ if err := s.doAllUnmounts(session); err != nil {
+ log.Println("failed unmount during delete", session.Name)
+ }
+ }
+ if !session.Status.CopyDataOutComplete && !session.Status.DeleteSkipCopyDataOut {
+ if err := s.fsProvider.DataCopyOut(action.Session); err != nil {
+ log.Println("failed DataCopyOut during delete", action.Session.Name)
+ }
+ }
+
+ // Only try delete if we have bricks to delete
+ if session.ActualSizeBytes > 0 {
+ if err := s.fsProvider.Delete(session); err != nil {
+ return session, err
+ }
+ }
+
+ return session, s.sessionRegistry.DeleteSession(session)
+ })
+}
+
+func (s *sessionActionHandler) handleCopyIn(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ // Get latest session now we have the mutex
+ session, err := s.sessionRegistry.GetSession(action.Session.Name)
+ if err != nil {
+ return action.Session, fmt.Errorf("error getting session: %s", err)
+ }
+ if session.Status.DeleteRequested {
+ return session, fmt.Errorf("can't do action once delete has been requested for")
+ }
+
+ if err := s.fsProvider.DataCopyIn(session); err != nil {
+ return session, err
+ }
+
+ session.Status.CopyDataInComplete = true
+ return s.sessionRegistry.UpdateSession(session)
+ })
+}
+
+func (s *sessionActionHandler) handleCopyOut(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ // Get latest session now we have the mutex
+ session, err := s.sessionRegistry.GetSession(action.Session.Name)
+ if err != nil {
+ return action.Session, fmt.Errorf("error getting session: %s", err)
+ }
+ if session.Status.DeleteRequested {
+ return session, fmt.Errorf("can't do action once delete has been requested for")
+ }
+
+ if err := s.fsProvider.DataCopyOut(session); err != nil {
+ return session, err
+ }
+
+ session.Status.CopyDataOutComplete = true
+ return s.sessionRegistry.UpdateSession(session)
+ })
+}
+
+func (s *sessionActionHandler) doAllMounts(actionSession datamodel.Session) (datamodel.Session, error) {
+ attachmentSession := datamodel.AttachmentSession{
+ Hosts: actionSession.RequestedAttachHosts,
+ SessionName: actionSession.Name,
+ }
+ if actionSession.ActualSizeBytes > 0 {
+ jobAttachmentStatus := datamodel.AttachmentSessionStatus{
+ AttachmentSession: attachmentSession,
+ GlobalMount: actionSession.VolumeRequest.Access == datamodel.Striped || actionSession.VolumeRequest.Access == datamodel.PrivateAndStriped,
+ PrivateMount: actionSession.VolumeRequest.Access == datamodel.Private || actionSession.VolumeRequest.Access == datamodel.PrivateAndStriped,
+ SwapBytes: actionSession.VolumeRequest.SwapBytes,
+ }
+ if actionSession.CurrentAttachments == nil {
+ actionSession.CurrentAttachments = map[datamodel.SessionName]datamodel.AttachmentSessionStatus{
+ actionSession.Name: jobAttachmentStatus,
+ }
+ } else {
+ actionSession.CurrentAttachments[actionSession.Name] = jobAttachmentStatus
+ }
+ session, err := s.sessionRegistry.UpdateSession(actionSession)
+ if err != nil {
+ return actionSession, err
+ }
+ actionSession = session
+
+ if err := s.fsProvider.Mount(actionSession, jobAttachmentStatus); err != nil {
+ return actionSession, err
+ }
+ // TODO: should we update the session? and delete attachments later?
+ }
+ for _, sessionName := range actionSession.MultiJobAttachments {
+ if err := s.doMultiJobMount(actionSession, sessionName); err != nil {
+ return actionSession, nil
+ }
+ }
+ return actionSession, nil
+}
+
+func (s *sessionActionHandler) doMultiJobMount(actionSession datamodel.Session, sessionName datamodel.SessionName) error {
+ sessionMutex, err := s.sessionRegistry.GetSessionMutex(sessionName)
+ if err != nil {
+ log.Printf("unable to get session mutex: %s due to: %s\n", sessionName, err)
+ return err
+ }
+ if err = sessionMutex.Lock(context.TODO()); err != nil {
+ log.Printf("unable to lock session mutex: %s due to: %s\n", sessionName, err)
+ return err
+ }
+ defer func() {
+ if err := sessionMutex.Unlock(context.TODO()); err != nil {
+ log.Println("failed to drop mutex for:", sessionName)
+ }
+ }()
+
+ multiJobSession, err := s.sessionRegistry.GetSession(sessionName)
+ if err != nil {
+ return err
+ }
+ if !multiJobSession.VolumeRequest.MultiJob {
+ log.Panicf("trying multi-job attach to non-multi job session %s", multiJobSession.Name)
+ }
+
+ attachmentSession := datamodel.AttachmentSession{
+ Hosts: actionSession.RequestedAttachHosts,
+ SessionName: actionSession.Name,
+ }
+ multiJobAttachmentStatus := datamodel.AttachmentSessionStatus{
+ AttachmentSession: attachmentSession,
+ GlobalMount: true,
+ }
+ if multiJobSession.CurrentAttachments == nil {
+ multiJobSession.CurrentAttachments = map[datamodel.SessionName]datamodel.AttachmentSessionStatus{
+ attachmentSession.SessionName: multiJobAttachmentStatus,
+ }
+ } else {
+ if _, ok := multiJobSession.CurrentAttachments[attachmentSession.SessionName]; ok {
+ return fmt.Errorf("already attached for session %s and multi-job %s",
+ attachmentSession.SessionName, sessionName)
+ }
+ multiJobSession.CurrentAttachments[attachmentSession.SessionName] = multiJobAttachmentStatus
+ }
+
+ multiJobSession, err = s.sessionRegistry.UpdateSession(multiJobSession)
+ if err != nil {
+ return err
+ }
+ return s.fsProvider.Mount(multiJobSession, multiJobAttachmentStatus)
+}
+
+func (s *sessionActionHandler) doMultiJobUnmount(actionSession datamodel.Session, sessionName datamodel.SessionName) error {
+ sessionMutex, err := s.sessionRegistry.GetSessionMutex(sessionName)
+ if err != nil {
+ log.Printf("unable to get session mutex: %s due to: %s\n", sessionName, err)
+ return err
+ }
+ if err = sessionMutex.Lock(context.TODO()); err != nil {
+ log.Printf("unable to lock session mutex: %s due to: %s\n", sessionName, err)
+ return err
+ }
+ defer func() {
+ if err := sessionMutex.Unlock(context.TODO()); err != nil {
+ log.Println("failed to drop mutex for:", sessionName)
+ }
+ }()
+
+ multiJobSession, err := s.sessionRegistry.GetSession(sessionName)
+ if err != nil {
+ return err
+ }
+ if !multiJobSession.VolumeRequest.MultiJob {
+ log.Panicf("trying multi-job attach to non-multi job session %s", multiJobSession.Name)
+ }
+
+ attachments, ok := multiJobSession.CurrentAttachments[actionSession.Name]
+ if !ok {
+ log.Println("skip detach, already seems to be detached")
+ return nil
+ }
+ if err := s.fsProvider.Unmount(multiJobSession, attachments); err != nil {
+ return err
+ }
+
+ // update multi job session to note our attachments have now gone
+ delete(multiJobSession.CurrentAttachments, actionSession.Name)
+ _, err = s.sessionRegistry.UpdateSession(multiJobSession)
+ return err
+}
+
+func (s *sessionActionHandler) doAllUnmounts(actionSession datamodel.Session) error {
+ if actionSession.ActualSizeBytes > 0 {
+ if err := s.fsProvider.Unmount(actionSession, actionSession.CurrentAttachments[actionSession.Name]); err != nil {
+ return err
+ }
+ }
+ for _, sessionName := range actionSession.MultiJobAttachments {
+ if err := s.doMultiJobUnmount(actionSession, sessionName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *sessionActionHandler) handleMount(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ session, err := s.sessionRegistry.GetSession(action.Session.Name)
+ if err != nil {
+ return action.Session, fmt.Errorf("error getting session: %s", err)
+ }
+ if session.Status.DeleteRequested {
+ return session, fmt.Errorf("can't do action once delete has been requested for")
+ }
+ if session.Status.MountComplete {
+ return session, errors.New("already mounted, can't mount again")
+ }
+
+ session, err = s.doAllMounts(session)
+ if err != nil {
+ if err := s.doAllUnmounts(session); err != nil {
+ log.Println("error while rolling back possible partial mount", action.Session.Name, err)
+ }
+ return action.Session, err
+ }
+
+ session.Status.MountComplete = true
+ return s.sessionRegistry.UpdateSession(session)
+ })
+}
+
+func (s *sessionActionHandler) handleUnmount(action datamodel.SessionAction) {
+ s.processWithMutex(action, func() (datamodel.Session, error) {
+ session, err := s.sessionRegistry.GetSession(action.Session.Name)
+ if err != nil {
+ return action.Session, fmt.Errorf("error getting session: %s", err)
+ }
+ if session.Status.DeleteRequested {
+ return session, fmt.Errorf("can't do action once delete has been requested for")
+ }
+ if session.Status.UnmountComplete {
+ return session, errors.New("already unmounted, can't umount again")
+ }
+
+ if err := s.doAllUnmounts(session); err != nil {
+ return action.Session, err
+ }
+
+ session.Status.UnmountComplete = true
+ return s.sessionRegistry.UpdateSession(session)
+ })
+}
+
+func (s *sessionActionHandler) RestoreSession(session datamodel.Session) {
+ if session.ActualSizeBytes == 0 {
+ // Nothing to do
+ return
+ }
+
+ // Get session lock before attempting the restore
+ sessionMutex, err := s.sessionRegistry.GetSessionMutex(session.Name)
+ if err != nil {
+ log.Printf("unable to get session mutex: %s due to: %s\n", session.Name, err)
+ return
+ }
+ err = sessionMutex.Lock(context.TODO())
+ if err != nil {
+ log.Printf("unable to lock session mutex: %s due to: %s\n", session.Name, err)
+ return
+ }
+ // Always drop mutex on function exit
+ defer func() {
+ if err := sessionMutex.Unlock(context.TODO()); err != nil {
+ log.Printf("failed to drop mutex for: %s due to: %s\n", session.Name, err.Error())
+ }
+ }()
+
+ err = s.fsProvider.Restore(session)
+
+ if err != nil {
+ log.Printf("unable to restore session: %+v\n", session)
+ session.Status.Error = err.Error()
+ if _, err := s.sessionRegistry.UpdateSession(session); err != nil {
+ log.Panicf("unable to report that session restore failed for session: %s", session.Name)
+ }
+ }
+
+ // TODO: do we just assume any pending mounts will resume in their own time? or should we retry mounts too?
+}
diff --git a/internal/pkg/dacd/brick_manager_impl/session_action_handler_test.go b/internal/pkg/dacd/brick_manager_impl/session_action_handler_test.go
new file mode 100644
index 00000000..25f7f1a6
--- /dev/null
+++ b/internal/pkg/dacd/brick_manager_impl/session_action_handler_test.go
@@ -0,0 +1,56 @@
+package brick_manager_impl
+
+import (
+ "context"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_filesystem"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_store"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSessionActionHandler_ProcessSessionAction_Unknown(t *testing.T) {
+ action := datamodel.SessionAction{}
+ handler := NewSessionActionHandler(nil)
+
+ assert.PanicsWithValue(t,
+ fmt.Sprintf("not yet implemented action for %+v", action),
+ func() { handler.ProcessSessionAction(action) })
+}
+
+func TestSessionActionHandler_handleCreate(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ registry := mock_registry.NewMockSessionRegistry(mockCtrl)
+ actions := mock_registry.NewMockSessionActions(mockCtrl)
+ fsProvider := mock_filesystem.NewMockProvider(mockCtrl)
+ handler := sessionActionHandler{
+ sessionRegistry: registry, actions: actions, fsProvider: fsProvider,
+ }
+ action := datamodel.SessionAction{
+ ActionType: datamodel.SessionCreateFilesystem,
+ Session: datamodel.Session{Name: "test", ActualSizeBytes: 42},
+ }
+ sessionMutex := mock_store.NewMockMutex(mockCtrl)
+ registry.EXPECT().GetSessionMutex(action.Session.Name).Return(sessionMutex, nil)
+ sessionMutex.EXPECT().Lock(context.TODO())
+ sessionMutex.EXPECT().Unlock(context.TODO())
+ registry.EXPECT().GetSession(action.Session.Name).Return(action.Session, nil)
+ fsProvider.EXPECT().Create(action.Session)
+ updatedSession := datamodel.Session{
+ Name: action.Session.Name,
+ Status: datamodel.SessionStatus{FileSystemCreated: true},
+ ActualSizeBytes: 42,
+ }
+ registry.EXPECT().UpdateSession(updatedSession).Return(updatedSession, nil)
+ updatedAction := datamodel.SessionAction{
+ ActionType: datamodel.SessionCreateFilesystem,
+ Session: updatedSession,
+ }
+ actions.EXPECT().CompleteSessionAction(updatedAction)
+
+ handler.handleCreate(action)
+}
diff --git a/internal/pkg/dacd/interface.go b/internal/pkg/dacd/interface.go
new file mode 100644
index 00000000..f252a446
--- /dev/null
+++ b/internal/pkg/dacd/interface.go
@@ -0,0 +1,14 @@
+package dacd
+
+type BrickManager interface {
+ // Get the current hostname key that is being kept alive
+ Hostname() string
+
+ // Tidy up from previous shutdowns
+ // , then start waiting for session actions
+ // notify dacctl we are listening via keep alive key
+ Startup()
+ // Wait for any events to complete
+ // then do any tidy up required for a graceful shutdown
+ Shutdown()
+}
diff --git a/internal/pkg/datamodel/brick_allocation.go b/internal/pkg/datamodel/brick_allocation.go
new file mode 100644
index 00000000..c5f10277
--- /dev/null
+++ b/internal/pkg/datamodel/brick_allocation.go
@@ -0,0 +1,14 @@
+package datamodel
+
+// You can only have zero or one allocations records for each Brick
+type BrickAllocation struct {
+ // Brick that is allocated
+ Brick Brick
+
+ // Name of the session that owns the brick
+ Session SessionName
+
+ // 0 index allocation is the primary brick,
+ // which is responsible for provisioning the associated volume
+ AllocatedIndex uint
+}
diff --git a/internal/pkg/datamodel/brick_host.go b/internal/pkg/datamodel/brick_host.go
new file mode 100644
index 00000000..24b81327
--- /dev/null
+++ b/internal/pkg/datamodel/brick_host.go
@@ -0,0 +1,38 @@
+package datamodel
+
+type BrickHostName string
+
+type BrickHost struct {
+ Name BrickHostName
+
+ // Returns all bricks
+ Bricks []Brick
+
+ // True if allowing new volumes to use bricks from this host
+ Enabled bool
+}
+
+type BrickHostStatus struct {
+ BrickHost BrickHost
+
+ // True is current keepalive key exists
+ Alive bool
+}
+
+type Brick struct {
+ // Bricks are identified by device and hostname
+ // It must only contain the characters A-Za-z0-9
+ // e.g. sdb, not /dev/sdb
+ Device string
+
+ // It must only contain the characters "A-Za-z0-9."
+ BrickHostName BrickHostName
+
+ // The bool a brick is associated with
+ // It must only contain the characters A-Za-z0-9
+ PoolName PoolName
+
+ // TODO: move this to bytes, and make bytes a special type?
+ // Size of the brick, defines the pool granularity
+ CapacityGiB uint
+}
diff --git a/internal/pkg/datamodel/pool.go b/internal/pkg/datamodel/pool.go
new file mode 100644
index 00000000..2d226cb3
--- /dev/null
+++ b/internal/pkg/datamodel/pool.go
@@ -0,0 +1,22 @@
+package datamodel
+
+type PoolName string
+
+type Pool struct {
+ // The pool is derived from all the reported bricks
+ Name PoolName
+
+ // This is the allocation unit for the pool
+ // It is the minimum size of any registered brick
+ GranularityBytes uint
+}
+
+type PoolInfo struct {
+ Pool Pool
+
+ // Bricks from alive hosts
+ AvailableBricks []Brick
+
+ // All currently active bricks
+ AllocatedBricks []BrickAllocation
+}
diff --git a/internal/pkg/datamodel/session.go b/internal/pkg/datamodel/session.go
new file mode 100644
index 00000000..fadc6486
--- /dev/null
+++ b/internal/pkg/datamodel/session.go
@@ -0,0 +1,165 @@
+package datamodel
+
+type SessionName string
+
+// This object is updated by dacctl
+// And actions are sent relative to a Session
+// and the primary brick waits for the session
+type Session struct {
+ // Job name or persistent buffer name
+ Name SessionName
+
+ // Currently stored revision
+ // this is checked when an update is requested
+ Revision int64
+
+ // unix uid
+ Owner uint
+
+ // unix group id
+ Group uint
+
+ // utc unix timestamp when buffer created
+ CreatedAt uint
+
+ // Details of what was requested
+ VolumeRequest VolumeRequest
+
+ // Flags about current state of the buffer
+ Status SessionStatus
+
+ // Request certain files to be staged in
+ // Not currently allowed for multi job volumes
+ StageInRequests []DataCopyRequest
+
+ // Request certain files to be staged in
+ // Not currently allowed for multi job volumes
+ StageOutRequests []DataCopyRequest
+
+ // There maybe be attachments to multiple shared volumes
+ MultiJobAttachments []SessionName
+
+ // Environment variables for each volume associated with the job
+ Paths map[string]string
+
+ // Resources used by session once pool granularity is taken into account
+ ActualSizeBytes int
+
+ // List of the bricks allocated to implement the JobVolume
+ // One is the primary brick that should be watching for all actions
+ AllocatedBricks []Brick
+
+ // Where session requests should be sent
+ PrimaryBrickHost BrickHostName
+
+ // Compute hosts for this session
+ // Note: should be empty for multi-job volumes
+ RequestedAttachHosts []string
+
+ // Used by filesystem provider to store internal state
+ // and track if the filesystem had a recent error
+ FilesystemStatus FilesystemStatus
+
+ // For multi-job volumes these are always other sessions
+ // for job volumes this is always for just this session
+ CurrentAttachments map[SessionName]AttachmentSessionStatus
+}
+
+type FilesystemStatus struct {
+ Error string
+ InternalName string
+ InternalData string
+}
+
+type AttachmentSession struct {
+ SessionName SessionName
+ Hosts []string
+}
+
+type AttachmentSessionStatus struct {
+ AttachmentSession AttachmentSession
+
+ GlobalMount bool
+ PrivateMount bool
+ SwapBytes int
+
+ DetachRequested bool // TODO: delete this bit?
+ Error string
+}
+
+type SessionStatus struct {
+ // If not nil, the session has an unresolved error
+ // and can't be mounted by any new sessions
+ // but it can be deleted
+ Error string
+
+ // CreateVolume has succeeded, so other actions can now happen
+ FileSystemCreated bool
+
+ // Assuming one data in / data out cycle per job
+ CopyDataInComplete bool
+ CopyDataOutComplete bool
+
+ // Records if we have started trying to delete
+ DeleteRequested bool
+
+ // Records if we should skip copy data out on delete
+ DeleteSkipCopyDataOut bool
+
+ // Mount status
+ UnmountComplete bool
+ MountComplete bool
+}
+
+type VolumeRequest struct {
+ MultiJob bool
+ Caller string
+ TotalCapacityBytes int
+ PoolName PoolName
+ Access AccessMode
+ Type BufferType
+ SwapBytes int
+}
+
+type AccessMode int
+
+const (
+ NoAccess AccessMode = iota
+ Striped
+ Private
+ PrivateAndStriped
+)
+
+type BufferType int
+
+const (
+ Scratch BufferType = iota
+ Cache
+)
+
+type DataCopyRequest struct {
+ // Source points to a File or a Directory,
+ // or a file that contains a list of source and destinations,
+ // with each pair on a new line
+ SourceType SourceType
+ // The path is either to a file or a directory or
+ // a list with source and destination file space separated pairs, each on a new line
+ Source string
+ // Must be empty string for type list, otherwise specified location
+ Destination string
+ // Used to notify if copy in has been requested
+ RequestCopyIn bool
+ // Report if the copy has completed
+ CopyCompleted bool
+ // if there was problem, record it
+ Error string
+}
+
+type SourceType string
+
+const (
+ File SourceType = "file"
+ Directory SourceType = "directory"
+ // Provided a file that has source and destination file space separated pairs, each on a new line
+ List SourceType = "list"
+)
diff --git a/internal/pkg/datamodel/session_action.go b/internal/pkg/datamodel/session_action.go
new file mode 100644
index 00000000..286fc17c
--- /dev/null
+++ b/internal/pkg/datamodel/session_action.go
@@ -0,0 +1,21 @@
+package datamodel
+
+type SessionAction struct {
+ Uuid string
+ Session Session
+ ActionType SessionActionType
+ Error string
+}
+
+type SessionActionType string
+
+// TODO: probably should be an int with custom parser?
+const (
+ UnknownSessionAction SessionActionType = SessionActionType("")
+ SessionCreateFilesystem = SessionActionType("CreateFilesystem")
+ SessionDelete = SessionActionType("Delete")
+ SessionCopyDataIn = SessionActionType("CopyDataIn")
+ SessionMount = SessionActionType("Mount")
+ SessionUnmount = SessionActionType("Unmount")
+ SessionCopyDataOut = SessionActionType("CopyDataOut")
+)
diff --git a/internal/pkg/etcdregistry/keystore.go b/internal/pkg/etcdregistry/keystore.go
deleted file mode 100644
index b4ecaed1..00000000
--- a/internal/pkg/etcdregistry/keystore.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package etcdregistry
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/clientv3util"
- "github.com/coreos/etcd/clientv3/concurrency"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/transport"
- "log"
- "os"
- "strings"
- "time"
-)
-
-func getTLSConfig() *tls.Config {
- certFile := os.Getenv("ETCDCTL_CERT_FILE")
- keyFile := os.Getenv("ETCDCTL_KEY_FILE")
- caFile := os.Getenv("ETCDCTL_CA_FILE")
-
- if certFile == "" || keyFile == "" || caFile == "" {
- return nil
- }
-
- tlsInfo := transport.TLSInfo{
- CertFile: certFile,
- KeyFile: keyFile,
- TrustedCAFile: caFile,
- }
- tlsConfig, err := tlsInfo.ClientConfig()
- if err != nil {
- log.Fatal(err)
- }
- return tlsConfig
-}
-
-func getEndpoints() []string {
- endpoints := os.Getenv("ETCDCTL_ENDPOINTS")
- if endpoints == "" {
- endpoints = os.Getenv("ETCD_ENDPOINTS")
- }
- if endpoints == "" {
- log.Fatalf("Must set ETCDCTL_ENDPOINTS environemnt variable, e.g. export ETCDCTL_ENDPOINTS=127.0.0.1:2379")
- }
- return strings.Split(endpoints, ",")
-}
-
-func newEtcdClient() *clientv3.Client {
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: getEndpoints(),
- DialTimeout: 10 * time.Second,
- TLS: getTLSConfig(),
- })
- if err != nil {
- fmt.Println("failed to create client")
- log.Fatal(err)
- }
- return cli
-}
-
-func NewKeystore() keystoreregistry.Keystore {
- cli := newEtcdClient()
- return &etcKeystore{
- Watcher: cli.Watcher,
- KV: cli.KV,
- Lease: cli.Lease,
- Client: cli,
- }
-}
-
-type etcKeystore struct {
- Watcher clientv3.Watcher
- KV clientv3.KV
- Lease clientv3.Lease
- Client *clientv3.Client
-}
-
-func (client *etcKeystore) NewMutex(lockKey string) (keystoreregistry.Mutex, error) {
- session, err := concurrency.NewSession(client.Client)
- if err != nil {
- return nil, err
- }
- key := fmt.Sprintf("/locks/%s", lockKey)
- return concurrency.NewMutex(session, key), nil
-}
-
-func handleError(err error) {
- if err != nil {
- switch err {
- case context.Canceled:
- log.Printf("ctx is canceled by another routine: %v", err)
- case context.DeadlineExceeded:
- log.Printf("ctx is attached with a deadline is exceeded: %v", err)
- case rpctypes.ErrEmptyKey:
- log.Printf("client-side error: %v", err)
- default:
- log.Printf("bad cluster endpoints, which are not etcd servers: %v", err)
- }
- log.Fatal(err)
- }
-}
-
-func (client *etcKeystore) Close() error {
- return client.Client.Close()
-}
-
-func (client *etcKeystore) runTransaction(ifOps []clientv3.Cmp, thenOps []clientv3.Op) error {
- kvc := clientv3.NewKV(client.Client)
- kvc.Txn(context.Background())
- response, err := kvc.Txn(context.Background()).If(ifOps...).Then(thenOps...).Commit()
- handleError(err)
-
- if !response.Succeeded {
- log.Println(ifOps)
- return fmt.Errorf("transaction failed, as condition not met")
- }
- return nil
-}
-
-func (client *etcKeystore) Add(keyValues []keystoreregistry.KeyValue) error {
- var ifOps []clientv3.Cmp
- var thenOps []clientv3.Op
- for _, keyValue := range keyValues {
- ifOps = append(ifOps, clientv3util.KeyMissing(keyValue.Key))
- thenOps = append(thenOps, clientv3.OpPut(keyValue.Key, keyValue.Value))
- }
- return client.runTransaction(ifOps, thenOps)
-}
-
-func (client *etcKeystore) Update(keyValues []keystoreregistry.KeyValueVersion) error {
- var ifOps []clientv3.Cmp
- var thenOps []clientv3.Op
- for _, keyValue := range keyValues {
- if keyValue.ModRevision > 0 {
- ifOps = append(ifOps, clientv3util.KeyExists(keyValue.Key)) // only add new keys if ModRevision == 0
- checkModRev := clientv3.Compare(clientv3.ModRevision(keyValue.Key), "=", keyValue.ModRevision)
- ifOps = append(ifOps, checkModRev)
- }
- thenOps = append(thenOps, clientv3.OpPut(keyValue.Key, keyValue.Value))
- }
- return client.runTransaction(ifOps, thenOps)
-}
-
-func (client *etcKeystore) DeleteAll(keyValues []keystoreregistry.KeyValueVersion) error {
- var ifOps []clientv3.Cmp
- var thenOps []clientv3.Op
- for _, keyValue := range keyValues {
- ifOps = append(ifOps, clientv3util.KeyExists(keyValue.Key))
- if keyValue.ModRevision > 0 {
- checkModRev := clientv3.Compare(clientv3.ModRevision(keyValue.Key), "=", keyValue.ModRevision)
- ifOps = append(ifOps, checkModRev)
- }
- thenOps = append(thenOps, clientv3.OpDelete(keyValue.Key))
- }
- return client.runTransaction(ifOps, thenOps)
-}
-
-func getKeyValueVersion(rawKeyValue *mvccpb.KeyValue) *keystoreregistry.KeyValueVersion {
- if rawKeyValue == nil {
- return nil
- }
- return &keystoreregistry.KeyValueVersion{
- Key: string(rawKeyValue.Key),
- Value: string(rawKeyValue.Value),
- ModRevision: rawKeyValue.ModRevision,
- CreateRevision: rawKeyValue.CreateRevision,
- }
-}
-
-func (client *etcKeystore) GetAll(prefix string) ([]keystoreregistry.KeyValueVersion, error) {
- kvc := clientv3.NewKV(client.Client)
- response, err := kvc.Get(context.Background(), prefix, clientv3.WithPrefix())
- handleError(err)
-
- if response.Count == 0 {
- return []keystoreregistry.KeyValueVersion{},
- fmt.Errorf("unable to find any values for prefix: %s", prefix)
- }
- var values []keystoreregistry.KeyValueVersion
- for _, rawKeyValue := range response.Kvs {
- values = append(values, *getKeyValueVersion(rawKeyValue))
- }
- return values, nil
-}
-
-func (client *etcKeystore) Get(key string) (keystoreregistry.KeyValueVersion, error) {
- kvc := clientv3.NewKV(client.Client)
- response, err := kvc.Get(context.Background(), key)
- handleError(err)
-
- value := keystoreregistry.KeyValueVersion{}
-
- if response.Count == 0 {
- return value, fmt.Errorf("unable to find any values for key: %s", key)
- }
- if response.Count > 1 {
- panic(errors.New("should never get more than one value for get"))
- }
-
- return *getKeyValueVersion(response.Kvs[0]), nil
-}
-
-func (client *etcKeystore) KeepAliveKey(key string) error {
- kvc := clientv3.NewKV(client.Client)
-
- getResponse, err := kvc.Get(context.Background(), key)
- if getResponse.Count == 1 {
- // if another host seems to exist, back off for 10 seconds incase we just did a quick restart
- time.Sleep(time.Second * 10)
- }
-
- // TODO what about configure timeout and ttl?
- var ttl int64 = 10
- grantResponse, err := client.Client.Grant(context.Background(), ttl)
- if err != nil {
- log.Fatal(err)
- }
- leaseID := grantResponse.ID
-
- txnResponse, err := kvc.Txn(context.Background()).
- If(clientv3util.KeyMissing(key)).
- Then(clientv3.OpPut(key, "keep-alive", clientv3.WithLease(leaseID), clientv3.WithPrevKV())).
- Commit()
- handleError(err)
- if !txnResponse.Succeeded {
- return fmt.Errorf("unable to create keep-alive key: %s", key)
- }
-
- ch, err := client.Client.KeepAlive(context.Background(), leaseID)
- if err != nil {
- log.Fatal(err)
- }
-
- counter := 9
- go func() {
- for range ch {
- if counter >= 9 {
- counter = 0
- log.Println("Still refreshing key:", key)
- } else {
- counter++
- }
- }
- log.Panicf("Unable to refresh key: %s", key)
- }()
- return nil
-}
-
-// TODO... old methods may need removing....
-
-func (client *etcKeystore) CleanPrefix(prefix string) error {
- kvc := clientv3.NewKV(client.Client)
- response, err := kvc.Delete(context.Background(), prefix, clientv3.WithPrefix())
- handleError(err)
-
- if response.Deleted == 0 {
- return fmt.Errorf("no keys with prefix: %s", prefix)
- }
-
- log.Printf("Cleaned %d keys with prefix: '%s'.\n", response.Deleted, prefix)
- // TODO return deleted count
- return nil
-}
diff --git a/internal/pkg/etcdregistry/watch.go b/internal/pkg/etcdregistry/watch.go
deleted file mode 100644
index 78b4b0c9..00000000
--- a/internal/pkg/etcdregistry/watch.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package etcdregistry
-
-import (
- "context"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/keystoreregistry"
- "github.com/coreos/etcd/clientv3"
-)
-
-func (client *etcKeystore) Watch(ctxt context.Context, key string, withPrefix bool) keystoreregistry.KeyValueUpdateChan {
- options := []clientv3.OpOption{clientv3.WithPrevKV()}
- if withPrefix {
- options = append(options, clientv3.WithPrefix())
- }
- rch := client.Watcher.Watch(ctxt, key, options...)
-
- c := make(chan keystoreregistry.KeyValueUpdate)
-
- go processWatchEvents(rch, c)
-
- return c
-}
-
-func processWatchEvents(watchChan clientv3.WatchChan, c chan keystoreregistry.KeyValueUpdate) {
- for watchResponse := range watchChan {
- // if error, send empty update with an error
- err := watchResponse.Err()
- if err != nil {
- c <- keystoreregistry.KeyValueUpdate{Err: err}
- }
-
- // send all events in this watch response
- for _, ev := range watchResponse.Events {
- update := keystoreregistry.KeyValueUpdate{
- IsCreate: ev.IsCreate(),
- IsModify: ev.IsModify(),
- IsDelete: ev.Type == clientv3.EventTypeDelete,
- }
- if update.IsCreate || update.IsModify {
- update.New = getKeyValueVersion(ev.Kv)
- }
- if update.IsDelete || update.IsModify {
- update.Old = getKeyValueVersion(ev.PrevKv)
- }
-
- c <- update
- }
- }
-
- // Assuming we get here when the context is cancelled or hits its timeout
- // i.e. there are no more events, so we close the channel
- close(c)
-}
diff --git a/internal/pkg/etcdregistry/watch_test.go b/internal/pkg/etcdregistry/watch_test.go
deleted file mode 100644
index 8dbddaa0..00000000
--- a/internal/pkg/etcdregistry/watch_test.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package etcdregistry
-
-import (
- "context"
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-type fakeWatcher struct {
- t *testing.T
- ch clientv3.WatchChan
- opts []clientv3.OpOption
-}
-
-func (fw fakeWatcher) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
- assert.Equal(fw.t, "key", key)
- assert.EqualValues(fw.t, len(fw.opts), len(opts)) // TODO: how to assert this properly?
- return fw.ch
-}
-func (fakeWatcher) Close() error {
- panic("implement me")
-}
-
-func TestEtcKeystore_Watch_Nil(t *testing.T) {
- keystore := etcKeystore{
- Watcher: fakeWatcher{
- t: t, ch: nil,
- opts: []clientv3.OpOption{clientv3.WithPrevKV()},
- },
- }
-
- response := keystore.Watch(context.TODO(), "key", false)
-
- assert.Empty(t, response)
-}
-
-func TestEtcKeystore_Watch(t *testing.T) {
- ch := make(chan clientv3.WatchResponse)
-
- keystore := etcKeystore{
- Watcher: fakeWatcher{
- t: t, ch: ch,
- opts: []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithPrevKV()},
- },
- }
-
- go func() {
- ch <- clientv3.WatchResponse{
- Events: []*clientv3.Event{
- {Type: clientv3.EventTypePut, Kv: &mvccpb.KeyValue{Key: []byte("key1")}},
- {Type: clientv3.EventTypePut, Kv: &mvccpb.KeyValue{Key: []byte("key2")}},
- }}
- ch <- clientv3.WatchResponse{
- Events: []*clientv3.Event{
- {
- Type: clientv3.EventTypePut,
- Kv: &mvccpb.KeyValue{ModRevision: 1, Key: []byte("key2")},
- PrevKv: &mvccpb.KeyValue{ModRevision: 1, Key: []byte("key2")},
- },
- }}
- ch <- clientv3.WatchResponse{
- Events: []*clientv3.Event{
- {Type: clientv3.EventTypeDelete, PrevKv: &mvccpb.KeyValue{Key: []byte("key2")}},
- {Type: clientv3.EventTypeDelete, PrevKv: &mvccpb.KeyValue{Key: []byte("key1")}},
- }}
- ch <- clientv3.WatchResponse{Canceled: true}
- close(ch)
- }()
-
- response := keystore.Watch(context.TODO(), "key", true)
-
- ev1 := <-response
- assert.True(t, ev1.IsCreate)
- assert.False(t, ev1.IsModify)
- assert.False(t, ev1.IsDelete)
- assert.Nil(t, ev1.Old)
- assert.EqualValues(t, "key1", ev1.New.Key)
-
- ev2 := <-response
- assert.True(t, ev2.IsCreate)
- assert.False(t, ev2.IsModify)
- assert.False(t, ev2.IsDelete)
- assert.Nil(t, ev2.Old)
- assert.EqualValues(t, "key2", ev2.New.Key)
-
- ev3 := <-response
- assert.False(t, ev3.IsCreate)
- assert.True(t, ev3.IsModify)
- assert.False(t, ev3.IsDelete)
- assert.EqualValues(t, "key2", ev3.New.Key)
- assert.EqualValues(t, "key2", ev3.Old.Key)
-
- ev4 := <-response
- assert.False(t, ev4.IsCreate)
- assert.False(t, ev4.IsModify)
- assert.True(t, ev4.IsDelete)
- assert.Nil(t, ev4.New)
- assert.EqualValues(t, "key2", ev4.Old.Key)
-
- ev5 := <-response
- assert.False(t, ev5.IsCreate)
- assert.False(t, ev5.IsModify)
- assert.True(t, ev5.IsDelete)
- assert.Nil(t, ev5.New)
- assert.EqualValues(t, "key1", ev5.Old.Key)
-
- ev6 := <-response
- assert.Equal(t,
- "etcdserver: mvcc: required revision is a future revision",
- ev6.Err.Error())
-
- // Check channels are closed
- _, ok := <-response
- assert.False(t, ok)
- _, ok = <-ch
- assert.False(t, ok)
-}
diff --git a/internal/pkg/facade/session.go b/internal/pkg/facade/session.go
new file mode 100644
index 00000000..6b4281b0
--- /dev/null
+++ b/internal/pkg/facade/session.go
@@ -0,0 +1,42 @@
+package facade
+
+import "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+
+// Each volume has an associated primary brick
+// that is responsible for responding to any actions
+// All the calls block until they are complete, or an error occurs
+type Session interface {
+ // Allocates storage and
+ CreateSession(session datamodel.Session) error
+
+ // Deletes the requested volume and session allocation
+ // If hurry, there is no stage-out attempted
+ // Unmount is always attempted before deleting the buffer
+ DeleteSession(sessionName datamodel.SessionName, hurry bool) error
+
+ // Update the session and trigger requested data copy in
+ CopyDataIn(sessionName datamodel.SessionName) error
+
+ // Update session hosts and attach volumes as needed
+ Mount(sessionName datamodel.SessionName, computeNodes []string, loginNodes []string) error
+
+ // Attempt to detach volumes
+ Unmount(sessionName datamodel.SessionName) error
+
+ // Update the session and trigger requested data copy out
+ CopyDataOut(sessionName datamodel.SessionName) error
+
+ // Get brick availability by pool
+ GetPools() ([]datamodel.PoolInfo, error)
+
+ // Get requested session
+ //
+ // Error if session does not exist
+ GetSession(sessionName datamodel.SessionName) (datamodel.Session, error)
+
+ // Get all sessions
+ GetAllSessions() ([]datamodel.Session, error)
+
+ // Generate ansible test dir
+ GenerateAnsible(sessionName datamodel.SessionName) (string, error)
+}
diff --git a/internal/pkg/facade/session_action_handler.go b/internal/pkg/facade/session_action_handler.go
new file mode 100644
index 00000000..52dd44d0
--- /dev/null
+++ b/internal/pkg/facade/session_action_handler.go
@@ -0,0 +1,8 @@
+package facade
+
+import "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+
+type SessionActionHandler interface {
+ ProcessSessionAction(action datamodel.SessionAction)
+ RestoreSession(session datamodel.Session)
+}
diff --git a/internal/pkg/filesystem/ansible.go b/internal/pkg/filesystem/ansible.go
new file mode 100644
index 00000000..7d455f01
--- /dev/null
+++ b/internal/pkg/filesystem/ansible.go
@@ -0,0 +1,8 @@
+package filesystem
+
+import "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+
+type Ansible interface {
+ // returns temp dir environment was created in
+ CreateEnvironment(session datamodel.Session) (string, error)
+}
diff --git a/internal/pkg/filesystem/provider.go b/internal/pkg/filesystem/provider.go
new file mode 100644
index 00000000..5e4ef62c
--- /dev/null
+++ b/internal/pkg/filesystem/provider.go
@@ -0,0 +1,15 @@
+package filesystem
+
+import "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+
+type Provider interface {
+ Create(session datamodel.Session) (datamodel.FilesystemStatus, error)
+ Restore(session datamodel.Session) error
+ Delete(session datamodel.Session) error
+
+ DataCopyIn(session datamodel.Session) error
+ DataCopyOut(session datamodel.Session) error
+
+ Mount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error
+ Unmount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error
+}
diff --git a/internal/pkg/filesystem_impl/ansible.go b/internal/pkg/filesystem_impl/ansible.go
new file mode 100644
index 00000000..33fc040b
--- /dev/null
+++ b/internal/pkg/filesystem_impl/ansible.go
@@ -0,0 +1,263 @@
+package filesystem_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem"
+ "gopkg.in/yaml.v2"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "time"
+)
+
+type HostInfo struct {
+ MGS string `yaml:"mgs,omitempty"`
+ MDTS map[string]int `yaml:"mdts,omitempty,flow"`
+ OSTS map[string]int `yaml:"osts,omitempty,flow"`
+}
+
+type FSInfo struct {
+ Hosts map[string]HostInfo `yaml:"hosts"`
+ Vars map[string]string `yaml:"vars"`
+}
+
+type FileSystems struct {
+ Children map[string]FSInfo `yaml:"children"`
+}
+
+type Wrapper struct {
+ Dacs FileSystems
+}
+
+func NewAnsible() filesystem.Ansible {
+ return &ansibleImpl{}
+}
+
+type ansibleImpl struct {
+}
+
+func (*ansibleImpl) CreateEnvironment(session datamodel.Session) (string, error) {
+ return setupAnsible(Lustre, session.FilesystemStatus.InternalName, session.AllocatedBricks)
+}
+
+var conf = config.GetFilesystemConfig()
+
+func getInventory(fsType FSType, fsUuid string, allBricks []datamodel.Brick) string {
+ allocationByHost := make(map[datamodel.BrickHostName][]datamodel.BrickAllocation)
+ for i, brick := range allBricks {
+ allocationByHost[brick.BrickHostName] = append(allocationByHost[brick.BrickHostName], datamodel.BrickAllocation{
+ Brick: brick,
+ AllocatedIndex: uint(i),
+ })
+ }
+
+ // If we have more brick allocations than maxMDTs
+ // assign at most one mdt per host.
+ // While this may give us less MDTs than max MDTs,
+ // but it helps spread MDTs across network connections
+ oneMdtPerHost := len(allBricks) > int(conf.MaxMDTs)
+
+ hosts := make(map[string]HostInfo)
+ mgsnode := ""
+ for host, allocations := range allocationByHost {
+ osts := make(map[string]int)
+ for _, allocation := range allocations {
+ osts[allocation.Brick.Device] = int(allocation.AllocatedIndex)
+ }
+
+ mdts := make(map[string]int)
+ if oneMdtPerHost {
+ allocation := allocations[0]
+ mdts[allocation.Brick.Device] = int(allocation.AllocatedIndex)
+ } else {
+ for _, allocation := range allocations {
+ mdts[allocation.Brick.Device] = int(allocation.AllocatedIndex)
+ }
+ }
+
+ hostInfo := HostInfo{MDTS: mdts, OSTS: osts}
+
+ if allocations[0].AllocatedIndex == 0 {
+ if fsType == Lustre {
+ hostInfo.MGS = conf.MGSDevice
+ } else {
+ hostInfo.MGS = allocations[0].Brick.Device
+ }
+ mgsnode = string(host)
+ }
+ hosts[string(host)] = hostInfo
+ }
+
+ // TODO: add attachments?
+
+ fsinfo := FSInfo{
+ Vars: map[string]string{
+ "mgsnode": mgsnode,
+ //"client_port": fmt.Sprintf("%d", volume.ClientPort),
+ "lnet_suffix": conf.LnetSuffix,
+ "mdt_size_mb": fmt.Sprintf("%d", conf.MDTSizeMB),
+ "fs_name": fsUuid,
+ },
+ Hosts: hosts,
+ }
+ fsname := fmt.Sprintf("%s", fsUuid)
+ data := Wrapper{Dacs: FileSystems{Children: map[string]FSInfo{fsname: fsinfo}}}
+
+ output, err := yaml.Marshal(data)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ strOut := string(output)
+ return strOut
+}
+
+func getPlaybook(fsType FSType, fsUuid string) string {
+ role := "lustre"
+ if fsType == BeegFS {
+ role = "beegfs"
+ }
+ return fmt.Sprintf(`---
+- name: Setup FS
+ hosts: %s
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: %s
+ vars:
+ fs_name: %s`, fsUuid, role, fsUuid)
+}
+
+func getAnsibleDir(suffix string) string {
+ return path.Join(conf.AnsibleDir, suffix)
+}
+
+func setupAnsible(fsType FSType, internalName string, bricks []datamodel.Brick) (string, error) {
+ if len(bricks) == 0 {
+ log.Panicf("can't create filesystem with no bricks: %s", internalName)
+ }
+
+ dir, err := ioutil.TempDir("", fmt.Sprintf("fs%s_", internalName))
+ if err != nil {
+ return dir, err
+ }
+ log.Println("Using ansible tempdir:", dir)
+
+ inventory := getInventory(fsType, internalName, bricks)
+ tmpInventory := filepath.Join(dir, "inventory")
+ if err := ioutil.WriteFile(tmpInventory, []byte(inventory), 0666); err != nil {
+ return dir, err
+ }
+ log.Println(inventory)
+
+ cmd := exec.Command("cp", "-r", getAnsibleDir("roles"), dir)
+ output, err := cmd.CombinedOutput()
+ log.Println("copy roles", string(output))
+ if err != nil {
+ return dir, err
+ }
+
+ for _, playbook := range []string{"create.yml", "delete.yml", "restore.yml"} {
+ cmd = exec.Command("cp", getAnsibleDir(playbook), dir)
+ output, err = cmd.CombinedOutput()
+ log.Println("copy playbooks", playbook, string(output))
+ if err != nil {
+ return dir, err
+ }
+ }
+
+ cmd = exec.Command("cp", "-r", getAnsibleDir(".venv"), dir)
+ output, err = cmd.CombinedOutput()
+ log.Println("copy venv", string(output))
+ return dir, err
+}
+
+func executeAnsibleSetup(internalName string, bricks []datamodel.Brick, doFormat bool) error {
+ // TODO: restore beegfs support
+ dir, err := setupAnsible(Lustre, internalName, bricks)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := os.RemoveAll(dir); err != nil {
+ log.Printf("error removing %s due to %s\n", dir, err)
+ }
+ }()
+
+ // allow skip format when trying to rebuild
+ if doFormat {
+ formatArgs := "create.yml -i inventory"
+ err = executeAnsiblePlaybook(dir, formatArgs)
+ if err != nil {
+ return fmt.Errorf("error during ansible create: %s", err.Error())
+ }
+ } else {
+ formatArgs := "restore.yml -i inventory"
+ err = executeAnsiblePlaybook(dir, formatArgs)
+ if err != nil {
+ return fmt.Errorf("error during ansible create: %s", err.Error())
+ }
+ }
+ return nil
+}
+
+func executeAnsibleTeardown(internalName string, bricks []datamodel.Brick) error {
+ dir, err := setupAnsible(Lustre, internalName, bricks)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := os.RemoveAll(dir); err != nil {
+ log.Printf("error removing %s due to %s\n", dir, err)
+ }
+ }()
+
+ formatArgs := "delete.yml -i inventory"
+ err = executeAnsiblePlaybook(dir, formatArgs)
+ if err != nil {
+ return fmt.Errorf("error during server clean: %s", err.Error())
+ }
+ return nil
+}
+
+func executeAnsiblePlaybook(dir string, args string) error {
+ // TODO: downgrade debug log!
+ cmdStr := fmt.Sprintf(`cd %s; . .venv/bin/activate; ansible-playbook %s;`, dir, args)
+ log.Println("Requested ansible:", cmdStr)
+
+ if conf.SkipAnsible {
+ log.Println("Skip as DAC_SKIP_ANSIBLE=True")
+ time.Sleep(time.Millisecond * 200)
+ return nil
+ }
+
+ var err error
+ for i := 1; i <= 3; i++ {
+ log.Println("Attempt", i, "of ansible:", cmdStr)
+ cmd := exec.Command("bash", "-c", cmdStr)
+
+ timer := time.AfterFunc(time.Minute*5, func() {
+ log.Println("Time up, waited more than 5 mins to complete.")
+ if err := cmd.Process.Kill(); err != nil {
+ log.Panicf("error trying to kill process: %s", err.Error())
+ }
+ })
+ output, currentErr := cmd.CombinedOutput()
+ timer.Stop()
+
+ if currentErr == nil {
+ log.Println("Completed ansible run:", cmdStr)
+ log.Println(string(output))
+ return nil
+ } else {
+ log.Println("Error in ansible run:", string(output))
+ err = currentErr
+ time.Sleep(time.Second * 2)
+ }
+ }
+ return err
+}
diff --git a/internal/pkg/filesystem_impl/ansible_test.go b/internal/pkg/filesystem_impl/ansible_test.go
new file mode 100644
index 00000000..30f82796
--- /dev/null
+++ b/internal/pkg/filesystem_impl/ansible_test.go
@@ -0,0 +1,162 @@
+package filesystem_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestPlugin_GetInventory(t *testing.T) {
+ brickAllocations := []datamodel.Brick{
+ {BrickHostName: "dac1", Device: "nvme1n1"},
+ {BrickHostName: "dac1", Device: "nvme2n1"},
+ {BrickHostName: "dac1", Device: "nvme3n1"},
+ {BrickHostName: "dac2", Device: "nvme2n1"},
+ {BrickHostName: "dac2", Device: "nvme3n1"},
+ }
+ fsUuid := "abcdefgh"
+ result := getInventory(BeegFS, fsUuid, brickAllocations)
+ expected := `dacs:
+ children:
+ abcdefgh:
+ hosts:
+ dac1:
+ mgs: nvme1n1
+ mdts: {nvme1n1: 0, nvme2n1: 1, nvme3n1: 2}
+ osts: {nvme1n1: 0, nvme2n1: 1, nvme3n1: 2}
+ dac2:
+ mdts: {nvme2n1: 3, nvme3n1: 4}
+ osts: {nvme2n1: 3, nvme3n1: 4}
+ vars:
+ fs_name: abcdefgh
+ lnet_suffix: ""
+ mdt_size_mb: "20480"
+ mgsnode: dac1
+`
+ assert.Equal(t, expected, result)
+}
+
+func TestPlugin_GetInventory_withNoOstOnOneHost(t *testing.T) {
+ brickAllocations := []datamodel.Brick{
+ {BrickHostName: "dac1", Device: "nvme1n1"},
+ {BrickHostName: "dac2", Device: "nvme2n1"},
+ {BrickHostName: "dac2", Device: "nvme3n1"},
+ }
+ fsUuid := "abcdefgh"
+ result := getInventory(Lustre, fsUuid, brickAllocations)
+ expected := `dacs:
+ children:
+ abcdefgh:
+ hosts:
+ dac1:
+ mgs: sdb
+ mdts: {nvme1n1: 0}
+ osts: {nvme1n1: 0}
+ dac2:
+ mdts: {nvme2n1: 1, nvme3n1: 2}
+ osts: {nvme2n1: 1, nvme3n1: 2}
+ vars:
+ fs_name: abcdefgh
+ lnet_suffix: ""
+ mdt_size_mb: "20480"
+ mgsnode: dac1
+`
+ assert.Equal(t, expected, result)
+}
+
+func TestPlugin_GetPlaybook_beegfs(t *testing.T) {
+ fsUuid := "abcdefgh"
+ result := getPlaybook(BeegFS, fsUuid)
+ assert.Equal(t, `---
+- name: Setup FS
+ hosts: abcdefgh
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: beegfs
+ vars:
+ fs_name: abcdefgh`, result)
+}
+
+func TestPlugin_GetPlaybook_lustre(t *testing.T) {
+ fsUuid := "abcdefgh"
+ result := getPlaybook(Lustre, fsUuid)
+ assert.Equal(t, `---
+- name: Setup FS
+ hosts: abcdefgh
+ any_errors_fatal: true
+ become: yes
+ roles:
+ - role: lustre
+ vars:
+ fs_name: abcdefgh`, result)
+}
+
+func TestPlugin_GetInventory_MaxMDT(t *testing.T) {
+ var brickAllocations []datamodel.Brick
+ for i := 1; i <= 26; i = i + 2 {
+ brickAllocations = append(brickAllocations, datamodel.Brick{
+ BrickHostName: datamodel.BrickHostName(fmt.Sprintf("dac%d", i)),
+ Device: "nvme1n1",
+ })
+ brickAllocations = append(brickAllocations, datamodel.Brick{
+ BrickHostName: datamodel.BrickHostName(fmt.Sprintf("dac%d", i)),
+ Device: "nvme2n1",
+ })
+ }
+
+ fsUuid := "abcdefgh"
+ result := getInventory(Lustre, fsUuid, brickAllocations)
+ expected := `dacs:
+ children:
+ abcdefgh:
+ hosts:
+ dac1:
+ mgs: sdb
+ mdts: {nvme1n1: 0}
+ osts: {nvme1n1: 0, nvme2n1: 1}
+ dac3:
+ mdts: {nvme1n1: 2}
+ osts: {nvme1n1: 2, nvme2n1: 3}
+ dac5:
+ mdts: {nvme1n1: 4}
+ osts: {nvme1n1: 4, nvme2n1: 5}
+ dac7:
+ mdts: {nvme1n1: 6}
+ osts: {nvme1n1: 6, nvme2n1: 7}
+ dac9:
+ mdts: {nvme1n1: 8}
+ osts: {nvme1n1: 8, nvme2n1: 9}
+ dac11:
+ mdts: {nvme1n1: 10}
+ osts: {nvme1n1: 10, nvme2n1: 11}
+ dac13:
+ mdts: {nvme1n1: 12}
+ osts: {nvme1n1: 12, nvme2n1: 13}
+ dac15:
+ mdts: {nvme1n1: 14}
+ osts: {nvme1n1: 14, nvme2n1: 15}
+ dac17:
+ mdts: {nvme1n1: 16}
+ osts: {nvme1n1: 16, nvme2n1: 17}
+ dac19:
+ mdts: {nvme1n1: 18}
+ osts: {nvme1n1: 18, nvme2n1: 19}
+ dac21:
+ mdts: {nvme1n1: 20}
+ osts: {nvme1n1: 20, nvme2n1: 21}
+ dac23:
+ mdts: {nvme1n1: 22}
+ osts: {nvme1n1: 22, nvme2n1: 23}
+ dac25:
+ mdts: {nvme1n1: 24}
+ osts: {nvme1n1: 24, nvme2n1: 25}
+ vars:
+ fs_name: abcdefgh
+ lnet_suffix: ""
+ mdt_size_mb: "20480"
+ mgsnode: dac1
+`
+ assert.Equal(t, expected, result)
+}
diff --git a/internal/pkg/filesystem_impl/copy.go b/internal/pkg/filesystem_impl/copy.go
new file mode 100644
index 00000000..848f5a26
--- /dev/null
+++ b/internal/pkg/filesystem_impl/copy.go
@@ -0,0 +1,80 @@
+package filesystem_impl
+
+import (
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "log"
+ "path"
+ "strings"
+)
+
+func processDataCopy(session datamodel.Session, request datamodel.DataCopyRequest) error {
+ cmd, err := generateDataCopyCmd(session, request)
+ if err != nil {
+ return err
+ }
+ if cmd == "" {
+ log.Println("No files to copy for:", session.Name)
+ return nil
+ }
+
+ log.Printf("Doing copy: %s", cmd)
+
+ // Make sure global dir is setup correctly
+ // TODO: share code with mount better
+ // TODO: Probably should all get setup in fs-ansible really!!
+ mountDir := fmt.Sprintf("/mnt/lustre/%s", session.FilesystemStatus.InternalName)
+ sharedDir := path.Join(mountDir, "/global")
+ if err := mkdir("localhost", sharedDir); err != nil {
+ return err
+ }
+ if err := fixUpOwnership("localhost", session.Owner, session.Group, sharedDir); err != nil {
+ return err
+ }
+
+ // Do the copy
+ return runner.Execute("localhost", cmd)
+}
+
+func generateDataCopyCmd(session datamodel.Session, request datamodel.DataCopyRequest) (string, error) {
+ rsync, err := generateRsyncCmd(session, request)
+ if err != nil || rsync == "" {
+ return "", err
+ }
+
+ if len(session.Paths) < 1 {
+ log.Panicf("trying to do data copy in for session with no paths %+v", session)
+ }
+ var exports []string
+ for name, value := range session.Paths {
+ exports = append(exports, fmt.Sprintf("export %s='%s'", name, value))
+ }
+ exportString := strings.Join(exports, " && ")
+
+ cmd := fmt.Sprintf("sudo -g '#%d' -u '#%d' %s", session.Group, session.Owner, rsync)
+ cmd = fmt.Sprintf("bash -c \"%s && %s\"", exportString, cmd)
+ return cmd, nil
+}
+
+func generateRsyncCmd(session datamodel.Session, request datamodel.DataCopyRequest) (string, error) {
+ if request.Source == "" && request.Destination == "" {
+ return "", nil
+ }
+
+ var flags string
+ if request.SourceType == datamodel.Directory {
+ flags = "-r -ospgu --stats"
+ } else if request.SourceType == datamodel.File {
+ flags = "-ospgu --stats"
+ } else {
+ return "", fmt.Errorf("unsupported source type %s for volume: %s", request.SourceType, session.Name)
+ }
+
+ return fmt.Sprintf("rsync %s %s %s", flags,
+ escapePath(request.Source),
+ escapePath(request.Destination)), nil
+}
+
+func escapePath(path string) string {
+ return strings.Replace(path, "$DW_JOB_STRIPED", "\\$DW_JOB_STRIPED", 1)
+}
diff --git a/internal/pkg/pfsprovider/ansible/copy_test.go b/internal/pkg/filesystem_impl/copy_test.go
similarity index 54%
rename from internal/pkg/pfsprovider/ansible/copy_test.go
rename to internal/pkg/filesystem_impl/copy_test.go
index 71aa8d57..5f6d14d5 100644
--- a/internal/pkg/pfsprovider/ansible/copy_test.go
+++ b/internal/pkg/filesystem_impl/copy_test.go
@@ -1,64 +1,71 @@
-package ansible
+package filesystem_impl
import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
"github.com/stretchr/testify/assert"
"testing"
)
func Test_GenerateDataCopy(t *testing.T) {
- testVolume := registry.Volume{
- Name: registry.VolumeName("asdf"),
- Owner: 1001,
- Group: 1002,
- UUID: "fsuuid",
+ session := datamodel.Session{
+ Name: "asdf",
+ Owner: 1001,
+ Group: 1002,
+ FilesystemStatus: datamodel.FilesystemStatus{InternalName: "fsuuid"},
+ Paths: map[string]string{
+ "DW_JOB_STRIPED": "/mnt/lustre/fsuuid/global",
+ },
}
- request := registry.DataCopyRequest{}
+ request := datamodel.DataCopyRequest{}
- cmd, err := generateDataCopyCmd(testVolume, request)
+ cmd, err := generateDataCopyCmd(session, request)
assert.Nil(t, err)
assert.Empty(t, cmd)
- request.SourceType = registry.File
+ request.SourceType = datamodel.File
request.Source = "$DW_JOB_STRIPED/source"
request.Destination = "dest"
- cmd, err = generateDataCopyCmd(testVolume, request)
+ cmd, err = generateDataCopyCmd(session, request)
assert.Nil(t, err)
assert.Equal(t, "bash -c \"export DW_JOB_STRIPED='/mnt/lustre/fsuuid/global' && sudo -g '#1002' -u '#1001' rsync -ospgu --stats \\$DW_JOB_STRIPED/source dest\"", cmd)
- request.SourceType = registry.List
+ cmd, err = generateDataCopyCmd(session, request)
+ assert.Nil(t, err)
+ assert.Equal(t, "bash -c \"export DW_JOB_STRIPED='/mnt/lustre/fsuuid/global' && sudo -g '#1002' -u '#1001' rsync -ospgu --stats \\$DW_JOB_STRIPED/source dest\"", cmd)
+
+ request.SourceType = datamodel.List
request.Source = "list_filename"
- cmd, err = generateDataCopyCmd(testVolume, request)
+ cmd, err = generateDataCopyCmd(session, request)
assert.Equal(t, "", cmd)
assert.Equal(t, "unsupported source type list for volume: asdf", err.Error())
}
func Test_GenerateRsyncCmd(t *testing.T) {
- testVolume := registry.Volume{
- Name: registry.VolumeName("asdf"),
+ testVolume := datamodel.Session{
+ Name: "asdf",
}
- request := registry.DataCopyRequest{}
+ request := datamodel.DataCopyRequest{}
cmd, err := generateRsyncCmd(testVolume, request)
assert.Nil(t, err)
assert.Empty(t, cmd)
- request.SourceType = registry.File
+ request.SourceType = datamodel.File
request.Source = "source"
request.Destination = "dest"
cmd, err = generateRsyncCmd(testVolume, request)
assert.Nil(t, err)
assert.Equal(t, "rsync -ospgu --stats source dest", cmd)
- request.SourceType = registry.Directory
+ request.SourceType = datamodel.Directory
request.Source = "source"
request.Destination = "dest"
cmd, err = generateRsyncCmd(testVolume, request)
assert.Nil(t, err)
assert.Equal(t, "rsync -r -ospgu --stats source dest", cmd)
- request.SourceType = registry.List
+ request.SourceType = datamodel.List
request.Source = "list_filename"
cmd, err = generateRsyncCmd(testVolume, request)
assert.Equal(t, "", cmd)
diff --git a/internal/pkg/filesystem_impl/fs_type.go b/internal/pkg/filesystem_impl/fs_type.go
new file mode 100644
index 00000000..069154d8
--- /dev/null
+++ b/internal/pkg/filesystem_impl/fs_type.go
@@ -0,0 +1,44 @@
+package filesystem_impl
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type FSType int
+
+const (
+ BeegFS FSType = iota
+ Lustre
+)
+
+var fsTypeStrings = map[FSType]string{
+ BeegFS: "BeegFS",
+ Lustre: "Lustre",
+}
+var stringToFSType = map[string]FSType{
+ "": BeegFS,
+ "BeegFS": BeegFS,
+ "Lustre": Lustre,
+}
+
+func (fsType FSType) String() string {
+ return fsTypeStrings[fsType]
+}
+
+func (fsType FSType) MarshalJSON() ([]byte, error) {
+ buffer := bytes.NewBufferString(`"`)
+ buffer.WriteString(fsTypeStrings[fsType])
+ buffer.WriteString(`"`)
+ return buffer.Bytes(), nil
+}
+
+func (fsType *FSType) UnmarshalJSON(b []byte) error {
+ var str string
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
+ *fsType = stringToFSType[str]
+ return nil
+}
diff --git a/internal/pkg/pfsprovider/ansible/mount.go b/internal/pkg/filesystem_impl/mount.go
similarity index 54%
rename from internal/pkg/pfsprovider/ansible/mount.go
rename to internal/pkg/filesystem_impl/mount.go
index ce3cd0b0..e7368c7b 100644
--- a/internal/pkg/pfsprovider/ansible/mount.go
+++ b/internal/pkg/filesystem_impl/mount.go
@@ -1,120 +1,90 @@
-package ansible
+package filesystem_impl
import (
"fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
"log"
- "os"
"os/exec"
"path"
- "strconv"
"time"
)
-func getMountDir(volume registry.Volume, jobName string) string {
+func getMountDir(sourceName datamodel.SessionName, isMultiJob bool, attachingForSession datamodel.SessionName) string {
// TODO: what about the environment variables that are being set? should share logic with here
- if volume.MultiJob {
- return fmt.Sprintf("/dac/%s_persistent_%s", jobName, volume.Name)
+ if isMultiJob {
+ return fmt.Sprintf("/dac/%s_persistent_%s", attachingForSession, sourceName)
}
- return fmt.Sprintf("/dac/%s_job", jobName)
+ return fmt.Sprintf("/dac/%s_job", sourceName)
}
-func getLnetSuffix() string {
- return os.Getenv("DAC_LNET_SUFFIX")
-}
-
-func getMdtSizeMB() uint {
- mdtSizeGB, err := strconv.ParseUint(os.Getenv("DAC_MDT_SIZE_GB"), 10, 32)
- if err == nil && mdtSizeGB > 0 {
- return uint(mdtSizeGB * 1024)
- }
- mdtSizeMB, err := strconv.ParseUint(os.Getenv("DAC_MDT_SIZE_MB"), 10, 32)
- if err == nil && mdtSizeMB > 0 {
- return uint(mdtSizeMB)
- }
- return uint(20 * 1024)
-}
-
-func mount(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- log.Println("Mount for:", volume.Name)
- var primaryBrickHost string
- for _, allocation := range brickAllocations {
- if allocation.AllocatedIndex == 0 {
- primaryBrickHost = allocation.Hostname
- break
- }
- }
+func mount(fsType FSType, sessionName datamodel.SessionName, isMultiJob bool, internalName string,
+ primaryBrickHost datamodel.BrickHostName, attachment datamodel.AttachmentSessionStatus,
+ owner uint, group uint) error {
+ log.Println("Mount for:", sessionName)
if primaryBrickHost == "" {
- log.Panicf("failed to find primary brick for volume: %s", volume.Name)
+ log.Panicf("failed to find primary brick for volume: %s", sessionName)
}
- lnetSuffix := getLnetSuffix()
-
if fsType == BeegFS {
// Write out the config needed, and do the mount using ansible
// TODO: Move Lustre mount here that is done below
- executeAnsibleMount(fsType, volume, brickAllocations)
+ //executeAnsibleMount(fsType, volume, brickAllocations)
}
- for _, attachment := range attachments {
- if attachment.State != registry.RequestAttach {
- log.Printf("Skipping volume %s attach: %+v", volume.Name, attachment)
- continue
- }
- log.Printf("Volume %s attaching with: %+v", volume.Name, attachment)
+ for _, attachHost := range attachment.AttachmentSession.Hosts {
+ log.Printf("Mounting %s on host: %s for session: %s", sessionName, attachHost,
+ attachment.AttachmentSession.SessionName)
- var mountDir = getMountDir(volume, attachment.Job)
- if err := mkdir(attachment.Hostname, mountDir); err != nil {
+ var mountDir = getMountDir(sessionName, isMultiJob, attachment.AttachmentSession.SessionName)
+ if err := mkdir(attachHost, mountDir); err != nil {
return err
}
- if err := mountRemoteFilesystem(fsType, attachment.Hostname, lnetSuffix,
- primaryBrickHost, volume.UUID, mountDir); err != nil {
+ if err := mountRemoteFilesystem(fsType, attachHost, conf.LnetSuffix,
+ string(primaryBrickHost), internalName, mountDir); err != nil {
return err
}
-
- if !volume.MultiJob && volume.AttachAsSwapBytes > 0 {
- swapDir := path.Join(mountDir, "/swap")
- if err := mkdir(attachment.Hostname, swapDir); err != nil {
- return err
- }
- if err := fixUpOwnership(attachment.Hostname, 0, 0, swapDir); err != nil {
- return err
- }
-
- swapSizeMB := int(volume.AttachAsSwapBytes / (1024 * 1024))
- swapFile := path.Join(swapDir, fmt.Sprintf("/%s", attachment.Hostname))
- loopback := fmt.Sprintf("/dev/loop%d", volume.ClientPort)
- if err := createSwap(attachment.Hostname, swapSizeMB, swapFile, loopback); err != nil {
- return err
- }
-
- if err := swapOn(attachment.Hostname, loopback); err != nil {
- return err
- }
- }
-
- if !volume.MultiJob && volume.AttachPrivateNamespace {
- privateDir := path.Join(mountDir, fmt.Sprintf("/private/%s", attachment.Hostname))
- if err := mkdir(attachment.Hostname, privateDir); err != nil {
+ // TODO: swap!
+ //if !volume.MultiJob && volume.AttachAsSwapBytes > 0 {
+ // swapDir := path.Join(mountDir, "/swap")
+ // if err := mkdir(attachment.Hostname, swapDir); err != nil {
+ // return err
+ // }
+ // if err := fixUpOwnership(attachment.Hostname, 0, 0, swapDir); err != nil {
+ // return err
+ // }
+ // swapSizeMB := int(volume.AttachAsSwapBytes / (1024 * 1024))
+ // swapFile := path.Join(swapDir, fmt.Sprintf("/%s", attachment.Hostname))
+ // loopback := fmt.Sprintf("/dev/loop%d", volume.ClientPort)
+ // if err := createSwap(attachment.Hostname, swapSizeMB, swapFile, loopback); err != nil {
+ // return err
+ // }
+ // if err := swapOn(attachment.Hostname, loopback); err != nil {
+ // return err
+ // }
+ //}
+
+ if attachment.PrivateMount {
+ privateDir := path.Join(mountDir, fmt.Sprintf("/private/%s", attachHost))
+ if err := mkdir(attachHost, privateDir); err != nil {
return err
}
- if err := fixUpOwnership(attachment.Hostname, volume.Owner, volume.Group, privateDir); err != nil {
+ if err := fixUpOwnership(attachHost, owner, group, privateDir); err != nil {
return err
}
// need a consistent symlink for shared environment variables across all hosts
- privateSymLinkDir := fmt.Sprintf("/dac/%s_job_private", attachment.Job)
- if err := createSymbolicLink(attachment.Hostname, privateDir, privateSymLinkDir); err != nil {
+ privateSymLinkDir := fmt.Sprintf("/dac/%s_job_private", sessionName)
+ if err := createSymbolicLink(attachHost, privateDir, privateSymLinkDir); err != nil {
return err
}
}
sharedDir := path.Join(mountDir, "/global")
- if err := mkdir(attachment.Hostname, sharedDir); err != nil {
+ if err := mkdir(attachHost, sharedDir); err != nil {
return err
}
- if err := fixUpOwnership(attachment.Hostname, volume.Owner, volume.Group, sharedDir); err != nil {
+ if err := fixUpOwnership(attachHost, owner, group, sharedDir); err != nil {
return err
}
}
@@ -123,55 +93,51 @@ func mount(fsType FSType, volume registry.Volume, brickAllocations []registry.Br
return nil
}
-func umount(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- log.Println("Umount for:", volume.Name)
-
- for _, attachment := range attachments {
- if attachment.State != registry.RequestDetach {
- log.Printf("Skipping volume %s detach for: %+v", volume.Name, attachment)
- continue
- }
- log.Printf("Volume %s dettaching: %+v", volume.Name, attachment)
-
- var mountDir = getMountDir(volume, attachment.Job)
- if !volume.MultiJob && volume.AttachAsSwapBytes > 0 {
- swapFile := path.Join(mountDir, fmt.Sprintf("/swap/%s", attachment.Hostname)) // TODO share?
- loopback := fmt.Sprintf("/dev/loop%d", volume.ClientPort) // TODO share?
- if err := swapOff(attachment.Hostname, loopback); err != nil {
- log.Printf("Warn: failed to swap off %+v", attachment)
- }
- if err := detachLoopback(attachment.Hostname, loopback); err != nil {
- log.Printf("Warn: failed to detach loopback %+v", attachment)
- }
- if err := removeSubtree(attachment.Hostname, swapFile); err != nil {
- return err
- }
- }
-
- if !volume.MultiJob && volume.AttachPrivateNamespace {
- privateSymLinkDir := fmt.Sprintf("/dac/%s_job_private", attachment.Job)
- if err := removeSubtree(attachment.Hostname, privateSymLinkDir); err != nil {
+func unmount(fsType FSType, sessionName datamodel.SessionName, isMultiJob bool, internalName string,
+ primaryBrickHost datamodel.BrickHostName, attachment datamodel.AttachmentSessionStatus) error {
+ log.Println("Umount for:", sessionName)
+
+ for _, attachHost := range attachment.AttachmentSession.Hosts {
+ log.Printf("Unmounting %s on host: %s for session: %s", sessionName, attachHost,
+ attachment.AttachmentSession.SessionName)
+
+ var mountDir = getMountDir(sessionName, isMultiJob, attachment.AttachmentSession.SessionName)
+ // TODO: swap!
+ //if !volume.MultiJob && volume.AttachAsSwapBytes > 0 {
+ // swapFile := path.Join(mountDir, fmt.Sprintf("/swap/%s", attachment.Hostname)) // TODO share?
+ // loopback := fmt.Sprintf("/dev/loop%d", volume.ClientPort) // TODO share?
+ // if err := swapOff(attachment.Hostname, loopback); err != nil {
+ // log.Printf("Warn: failed to swap off %+v", attachment)
+ // }
+ // if err := detachLoopback(attachment.Hostname, loopback); err != nil {
+ // log.Printf("Warn: failed to detach loopback %+v", attachment)
+ // }
+ // if err := removeSubtree(attachment.Hostname, swapFile); err != nil {
+ // return err
+ // }
+ //}
+ if attachment.PrivateMount {
+ privateSymLinkDir := fmt.Sprintf("/dac/%s_job_private", sessionName)
+ if err := removeSubtree(attachHost, privateSymLinkDir); err != nil {
return err
}
}
if fsType == Lustre {
- if err := umountLustre(attachment.Hostname, mountDir); err != nil {
+ if err := umountLustre(attachHost, mountDir); err != nil {
return err
}
- if err := removeSubtree(attachment.Hostname, mountDir); err != nil {
+ if err := removeSubtree(attachHost, mountDir); err != nil {
return err
}
-
}
}
if fsType == BeegFS {
// TODO: Move Lustre unmount here that is done below
- executeAnsibleUnmount(fsType, volume, brickAllocations)
+ // executeAnsibleUnmount(fsType, volume, brickAllocations)
// TODO: this makes copy out much harder in its current form :(
}
-
return nil
}
@@ -274,11 +240,11 @@ type Run interface {
type run struct {
}
+// TODO: need some code sharing here!!!
func (*run) Execute(hostname string, cmdStr string) error {
log.Println("SSH to:", hostname, "with command:", cmdStr)
- skipAnsible := os.Getenv("DAC_SKIP_ANSIBLE")
- if skipAnsible == "True" {
+ if conf.SkipAnsible {
log.Println("Skip as DAC_SKIP_ANSIBLE=True")
time.Sleep(time.Millisecond * 200)
return nil
@@ -289,7 +255,9 @@ func (*run) Execute(hostname string, cmdStr string) error {
timer := time.AfterFunc(time.Minute, func() {
log.Println("Time up, waited more than 5 mins to complete.")
- cmd.Process.Kill()
+ if err := cmd.Process.Kill(); err != nil {
+ log.Panicf("error trying to kill process: %s", err.Error())
+ }
})
output, err := cmd.CombinedOutput()
diff --git a/internal/pkg/filesystem_impl/mount_test.go b/internal/pkg/filesystem_impl/mount_test.go
new file mode 100644
index 00000000..8ca775ee
--- /dev/null
+++ b/internal/pkg/filesystem_impl/mount_test.go
@@ -0,0 +1,235 @@
+package filesystem_impl
+
+import (
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+type fakeRunner struct {
+ err error
+ calls int
+ hostnames []string
+ cmdStrs []string
+}
+
+func (f *fakeRunner) Execute(hostname string, cmdStr string) error {
+ f.calls += 1
+ f.hostnames = append(f.hostnames, hostname)
+ f.cmdStrs = append(f.cmdStrs, cmdStr)
+ if cmdStr == "grep /dac/job1_job /etc/mtab" {
+ return errors.New("trigger mount")
+ }
+ return f.err
+}
+
+func Test_mkdir(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ err := mkdir("host", "dir")
+ assert.Nil(t, err)
+ assert.Equal(t, "host", fake.hostnames[0])
+ assert.Equal(t, "mkdir -p dir", fake.cmdStrs[0])
+
+ runner = &fakeRunner{err: errors.New("expected")}
+ err = mkdir("", "")
+ assert.Equal(t, "expected", err.Error())
+}
+
+func Test_mountLustre(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ err := mountLustre("host", "-opa@o2ib1", "mgt", "fs", "/dac/job1_job")
+ assert.Nil(t, err)
+ assert.Equal(t, 2, fake.calls)
+ assert.Equal(t, "host", fake.hostnames[0])
+ assert.Equal(t, "host", fake.hostnames[1])
+ assert.Equal(t, "grep /dac/job1_job /etc/mtab", fake.cmdStrs[0])
+ assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid mgt-opa@o2ib1:/fs /dac/job1_job", fake.cmdStrs[1])
+
+ fake = &fakeRunner{err: errors.New("expected")}
+ runner = fake
+ err = mountRemoteFilesystem(Lustre, "host", "", "mgt", "fs", "asdf")
+ assert.Equal(t, "expected", err.Error())
+ assert.Equal(t, 2, fake.calls)
+ assert.Equal(t, "grep asdf /etc/mtab", fake.cmdStrs[0])
+ assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid mgt:/fs asdf", fake.cmdStrs[1])
+}
+
+func Test_createSwap(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ err := createSwap("host", 3, "file", "loopback")
+ assert.Nil(t, err)
+ assert.Equal(t, "host", fake.hostnames[0])
+ assert.Equal(t, "host", fake.hostnames[1])
+ assert.Equal(t, "host", fake.hostnames[2])
+ assert.Equal(t, 4, len(fake.cmdStrs))
+ assert.Equal(t, "dd if=/dev/zero of=file bs=1024 count=3072", fake.cmdStrs[0])
+ assert.Equal(t, "chmod 0600 file", fake.cmdStrs[1])
+ assert.Equal(t, "losetup loopback file", fake.cmdStrs[2])
+ assert.Equal(t, "mkswap loopback", fake.cmdStrs[3])
+}
+
+func Test_fixUpOwnership(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ err := fixUpOwnership("host", 10, 11, "dir")
+ assert.Nil(t, err)
+
+ assert.Equal(t, 2, fake.calls)
+ assert.Equal(t, "host", fake.hostnames[0])
+ assert.Equal(t, "chown 10:11 dir", fake.cmdStrs[0])
+ assert.Equal(t, "host", fake.hostnames[1])
+ assert.Equal(t, "chmod 770 dir", fake.cmdStrs[1])
+}
+
+func Test_Mount(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ sessionName := datamodel.SessionName("job1")
+ internalName := "fsuuid"
+ primaryBrickHost := datamodel.BrickHostName("host1")
+ owner := uint(1001)
+ group := uint(1002)
+ attachment := datamodel.AttachmentSessionStatus{
+ AttachmentSession: datamodel.AttachmentSession{
+ SessionName: "job2", // changed to prove this is not used
+ Hosts: []string{"client1", "client2"},
+ },
+ GlobalMount: true,
+ PrivateMount: true,
+ SwapBytes: 1024 * 1024, // 1 MiB
+ }
+ err := mount(Lustre, sessionName, false,
+ internalName, primaryBrickHost, attachment,
+ owner, group)
+ assert.Nil(t, err)
+ assert.Equal(t, 20, fake.calls)
+
+ assert.Equal(t, "client1", fake.hostnames[0])
+ assert.Equal(t, "mkdir -p /dac/job1_job", fake.cmdStrs[0])
+ assert.Equal(t, "grep /dac/job1_job /etc/mtab", fake.cmdStrs[1])
+ assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid host1:/fsuuid /dac/job1_job", fake.cmdStrs[2])
+
+ assert.Equal(t, "mkdir -p /dac/job1_job/private/client1", fake.cmdStrs[3])
+ assert.Equal(t, "chown 1001:1002 /dac/job1_job/private/client1", fake.cmdStrs[4])
+ assert.Equal(t, "chmod 770 /dac/job1_job/private/client1", fake.cmdStrs[5])
+ assert.Equal(t, "ln -s /dac/job1_job/private/client1 /dac/job1_job_private", fake.cmdStrs[6])
+
+ assert.Equal(t, "mkdir -p /dac/job1_job/global", fake.cmdStrs[7])
+ assert.Equal(t, "chown 1001:1002 /dac/job1_job/global", fake.cmdStrs[8])
+ assert.Equal(t, "chmod 770 /dac/job1_job/global", fake.cmdStrs[9])
+
+ assert.Equal(t, "client2", fake.hostnames[10])
+ assert.Equal(t, "mkdir -p /dac/job1_job", fake.cmdStrs[10])
+
+ assert.Equal(t, "client2", fake.hostnames[19])
+ assert.Equal(t, "chmod 770 /dac/job1_job/global", fake.cmdStrs[19])
+}
+
+func Test_Umount(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ sessionName := datamodel.SessionName("job4")
+ internalName := "fsuuid"
+ primaryBrickHost := datamodel.BrickHostName("host1")
+ attachment := datamodel.AttachmentSessionStatus{
+ AttachmentSession: datamodel.AttachmentSession{
+ SessionName: "job2",
+ Hosts: []string{"client1", "client2"},
+ },
+ GlobalMount: true,
+ PrivateMount: true,
+ SwapBytes: 1024 * 1024, // 1 MiB
+ }
+ err := unmount(Lustre, sessionName, false,
+ internalName, primaryBrickHost, attachment)
+ assert.Nil(t, err)
+ assert.Equal(t, 8, fake.calls)
+
+ assert.Equal(t, "client1", fake.hostnames[0])
+ assert.Equal(t, "rm -rf /dac/job4_job_private", fake.cmdStrs[0])
+ assert.Equal(t, "grep /dac/job4_job /etc/mtab", fake.cmdStrs[1])
+ assert.Equal(t, "umount /dac/job4_job", fake.cmdStrs[2])
+ assert.Equal(t, "rm -rf /dac/job4_job", fake.cmdStrs[3])
+
+ assert.Equal(t, "client2", fake.hostnames[7])
+ assert.Equal(t, "rm -rf /dac/job4_job", fake.cmdStrs[7])
+}
+
+func Test_Umount_multi(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ sessionName := datamodel.SessionName("asdf")
+ internalName := "uuidasdf"
+ primaryBrickHost := datamodel.BrickHostName("host1")
+ attachment := datamodel.AttachmentSessionStatus{
+ AttachmentSession: datamodel.AttachmentSession{
+ SessionName: "job1",
+ Hosts: []string{"client1"},
+ },
+ GlobalMount: true,
+ PrivateMount: false,
+ SwapBytes: 0,
+ }
+ err := unmount(Lustre, sessionName, true,
+ internalName, primaryBrickHost, attachment)
+
+ assert.Nil(t, err)
+ assert.Equal(t, 3, fake.calls)
+
+ assert.Equal(t, "client1", fake.hostnames[0])
+ assert.Equal(t, "grep /dac/job1_persistent_asdf /etc/mtab", fake.cmdStrs[0])
+ assert.Equal(t, "umount /dac/job1_persistent_asdf", fake.cmdStrs[1])
+ assert.Equal(t, "rm -rf /dac/job1_persistent_asdf", fake.cmdStrs[2])
+}
+
+func Test_Mount_multi(t *testing.T) {
+ defer func() { runner = &run{} }()
+ fake := &fakeRunner{}
+ runner = fake
+
+ sessionName := datamodel.SessionName("asdf")
+ internalName := "uuidasdf"
+ primaryBrickHost := datamodel.BrickHostName("host1")
+ owner := uint(1001)
+ group := uint(1002)
+ attachment := datamodel.AttachmentSessionStatus{
+ AttachmentSession: datamodel.AttachmentSession{
+ SessionName: "job1",
+ Hosts: []string{"client1"},
+ },
+ GlobalMount: true,
+ PrivateMount: false,
+ SwapBytes: 0,
+ }
+ err := mount(Lustre, sessionName, true,
+ internalName, primaryBrickHost, attachment,
+ owner, group)
+
+ assert.Nil(t, err)
+ assert.Equal(t, 5, fake.calls)
+
+ assert.Equal(t, "client1", fake.hostnames[0])
+ assert.Equal(t, "mkdir -p /dac/job1_persistent_asdf", fake.cmdStrs[0])
+ assert.Equal(t, "grep /dac/job1_persistent_asdf /etc/mtab", fake.cmdStrs[1])
+ assert.Equal(t, "mkdir -p /dac/job1_persistent_asdf/global", fake.cmdStrs[2])
+ assert.Equal(t, "chown 1001:1002 /dac/job1_persistent_asdf/global", fake.cmdStrs[3])
+ assert.Equal(t, "chmod 770 /dac/job1_persistent_asdf/global", fake.cmdStrs[4])
+}
diff --git a/internal/pkg/filesystem_impl/provider.go b/internal/pkg/filesystem_impl/provider.go
new file mode 100644
index 00000000..9b95c851
--- /dev/null
+++ b/internal/pkg/filesystem_impl/provider.go
@@ -0,0 +1,79 @@
+package filesystem_impl
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/filesystem"
+ "math/rand"
+ "time"
+)
+
+func NewFileSystemProvider(ansible filesystem.Ansible) filesystem.Provider {
+ return &fileSystemProvider{ansible: ansible}
+}
+
+type fileSystemProvider struct {
+ ansible filesystem.Ansible
+ // TODO: proper config object
+}
+
+const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+var source = rand.NewSource(time.Now().UnixNano())
+var randGenerator = rand.New(source)
+
+func GetNewUUID() string {
+ b := make([]byte, 8)
+ for i := range b {
+ b[i] = letters[randGenerator.Int63()%int64(len(letters))]
+ }
+ return string(b)
+}
+
+func (f *fileSystemProvider) Create(session datamodel.Session) (datamodel.FilesystemStatus, error) {
+ session.FilesystemStatus = datamodel.FilesystemStatus{
+ InternalName: GetNewUUID(),
+ InternalData: "",
+ }
+ err := executeAnsibleSetup(session.FilesystemStatus.InternalName, session.AllocatedBricks, true)
+ return session.FilesystemStatus, err
+}
+
+func (f *fileSystemProvider) Restore(session datamodel.Session) error {
+ return executeAnsibleSetup(session.FilesystemStatus.InternalName, session.AllocatedBricks, false)
+}
+
+func (f *fileSystemProvider) Delete(session datamodel.Session) error {
+ return executeAnsibleTeardown(session.FilesystemStatus.InternalName, session.AllocatedBricks)
+}
+
+func (f *fileSystemProvider) DataCopyIn(session datamodel.Session) error {
+ for _, dataCopy := range session.StageInRequests {
+ err := processDataCopy(session, dataCopy)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+
+}
+
+func (f *fileSystemProvider) DataCopyOut(session datamodel.Session) error {
+ for _, dataCopy := range session.StageOutRequests {
+ err := processDataCopy(session, dataCopy)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (f *fileSystemProvider) Mount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error {
+ return mount(Lustre, session.Name, session.VolumeRequest.MultiJob, session.FilesystemStatus.InternalName,
+ session.PrimaryBrickHost, attachments, session.Owner, session.Group)
+
+}
+
+func (f *fileSystemProvider) Unmount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error {
+ return unmount(Lustre, session.Name, session.VolumeRequest.MultiJob, session.FilesystemStatus.InternalName,
+ session.PrimaryBrickHost, attachments)
+}
diff --git a/internal/pkg/keystoreregistry/pool.go b/internal/pkg/keystoreregistry/pool.go
deleted file mode 100644
index 541b43e8..00000000
--- a/internal/pkg/keystoreregistry/pool.go
+++ /dev/null
@@ -1,421 +0,0 @@
-package keystoreregistry
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "math/rand"
- "strings"
- "time"
-)
-
-func NewPoolRegistry(keystore Keystore) registry.PoolRegistry {
- return &poolRegistry{keystore}
-}
-
-type poolRegistry struct {
- keystore Keystore
-}
-
-const registeredBricksPrefix = "/bricks/registered/"
-
-func getBrickInfoKey(hostname string, device string) string {
- return fmt.Sprintf("%s%s/%s/", registeredBricksPrefix, hostname, device)
-}
-
-const allocatedBricksPrefix = "/bricks/allocated/host/"
-
-func getPrefixAllocationHost(hostname string) string {
- return fmt.Sprintf("%s%s/", allocatedBricksPrefix, hostname)
-}
-
-func getBrickAllocationKeyHost(allocation registry.BrickAllocation) string {
- prefix := getPrefixAllocationHost(allocation.Hostname)
- return fmt.Sprintf("%s%s", prefix, allocation.Device)
-}
-
-func getPrefixAllocationVolume(volume registry.VolumeName) string {
- return fmt.Sprintf("/bricks/allocated/volume/%s/", volume)
-}
-func getBrickAllocationKeyVolume(allocation registry.BrickAllocation) string {
- prefix := getPrefixAllocationVolume(allocation.AllocatedVolume)
- return fmt.Sprintf("%s%d/%s/%s",
- prefix, allocation.AllocatedIndex, allocation.Hostname, allocation.Device)
-}
-
-func (poolRegistry *poolRegistry) UpdateHost(bricks []registry.BrickInfo) error {
- var values []KeyValueVersion
- var problems []string
- var hostname string
- for _, brickInfo := range bricks {
- if hostname == "" {
- hostname = brickInfo.Hostname
- }
- if hostname != brickInfo.Hostname {
- problems = append(problems, "Only one host to be updated at once")
- }
- // TODO: lots more error handing needed, like pool consistency, valid keys for etcd
- values = append(values, KeyValueVersion{
- Key: getBrickInfoKey(brickInfo.Hostname, brickInfo.Device),
- Value: toJson(brickInfo),
- })
- }
- if len(problems) > 0 {
- return fmt.Errorf("can't update host because: %s", strings.Join(problems, ", "))
- }
- return poolRegistry.keystore.Update(values)
-}
-
-func getKeepAliveKey(hostname string) string {
- return fmt.Sprintf("/host/keepalive/%s", hostname)
-}
-
-func (poolRegistry *poolRegistry) KeepAliveHost(hostname string) error {
- return poolRegistry.keystore.KeepAliveKey(getKeepAliveKey(hostname))
-}
-
-func (poolRegistry *poolRegistry) HostAlive(hostname string) (bool, error) {
- keyValue, err := poolRegistry.keystore.Get(getKeepAliveKey(hostname))
- return keyValue.Key != "", err
-}
-
-func (poolRegistry *poolRegistry) AllocateBricksForVolume(volume registry.Volume) ([]registry.BrickAllocation, error) {
- // No bricks requested, so return right away
- if volume.SizeBricks == 0 {
- return nil, nil
- }
-
- // TODO: would retries on clashes be better? this seems simpler for now
- // lock the pool to stop races
- mutex, err := poolRegistry.keystore.NewMutex(fmt.Sprintf("allocation/%s", volume.Pool))
- if err != nil {
- return nil, err
- }
- if err := mutex.Lock(context.TODO()); err != nil {
- return nil, err
- }
- defer mutex.Unlock(context.TODO())
-
- pools, err := poolRegistry.Pools()
- if err != nil {
- return nil, err
- }
-
- var pool *registry.Pool
- for _, candidate := range pools {
- if candidate.Name == volume.Pool {
- pool = &candidate
- }
- }
-
- if pool == nil {
- return nil, fmt.Errorf("unable to find pool %s", volume.Pool)
- }
-
- allocations, err := getBricksForBuffer(pool, &volume)
- if err != nil {
- return nil, err
- }
-
- // Note that this call triggers dacd of the first allocation to provision the bricks
- // TODO: probably better to make the provision a spearate state change?
- if err := poolRegistry.allocateBricks(allocations); err != nil {
- return nil, err
- }
-
- return allocations, nil
-}
-
-func getBricksForBuffer(pool *registry.Pool, volume *registry.Volume) ([]registry.BrickAllocation, error) {
-
- availableBricks := pool.AvailableBricks
- var chosenBricks []registry.BrickInfo
-
- // pick some of the available bricks
- s := rand.NewSource(time.Now().Unix())
- r := rand.New(s) // initialize local pseudorandom generator
-
- randomWalk := r.Perm(len(availableBricks))
- for _, i := range randomWalk {
- candidateBrick := availableBricks[i]
-
- // TODO: should not the random walk mean this isn't needed!
- goodCandidate := true
- for _, brick := range chosenBricks {
- if brick == candidateBrick {
- goodCandidate = false
- break
- }
- }
- if goodCandidate {
- chosenBricks = append(chosenBricks, candidateBrick)
- }
- if uint(len(chosenBricks)) >= volume.SizeBricks {
- break
- }
- }
-
- if uint(len(chosenBricks)) != volume.SizeBricks {
- return nil, fmt.Errorf(
- "unable to get number of requested bricks (%d) for given pool (%s)",
- volume.SizeBricks, pool.Name)
- }
-
- var allocations []registry.BrickAllocation
- for _, brick := range chosenBricks {
- allocations = append(allocations, registry.BrickAllocation{
- Device: brick.Device,
- Hostname: brick.Hostname,
- AllocatedVolume: volume.Name,
- DeallocateRequested: false,
- })
- }
- return allocations, nil
-}
-
-func (poolRegistry *poolRegistry) allocateBricks(allocations []registry.BrickAllocation) error {
- var bricks []registry.BrickInfo
- var volume registry.VolumeName
- var raw []KeyValue
- for i, allocation := range allocations {
- brick, err := poolRegistry.GetBrickInfo(allocation.Hostname, allocation.Device)
- if err != nil {
- return fmt.Errorf("unable to find brick for: %+v", allocation)
- }
- bricks = append(bricks, brick)
-
- if allocation.DeallocateRequested {
- return fmt.Errorf("should not requeste deallocated: %+v", allocation)
- }
- if allocation.AllocatedIndex != 0 {
- return fmt.Errorf("should not specify the allocated index")
- }
- if volume == "" {
- volume = allocation.AllocatedVolume
- }
- if volume != allocation.AllocatedVolume {
- return fmt.Errorf("all allocations must be for same volume")
- }
- // TODO: this error checking suggest we specify the wrong format here!
-
- allocation.AllocatedIndex = uint(i)
- raw = append(raw, KeyValue{
- Key: getBrickAllocationKeyHost(allocation),
- Value: toJson(allocation),
- })
- // TODO: maybe just point to the other key?? this duplication is terrible
- raw = append(raw, KeyValue{
- Key: getBrickAllocationKeyVolume(allocation),
- Value: toJson(allocation),
- })
- }
- return poolRegistry.keystore.Add(raw)
-}
-
-func (poolRegistry *poolRegistry) deallocate(raw []KeyValueVersion,
- updated []KeyValueVersion) ([]KeyValueVersion, []string) {
- var keys []string
- for _, entry := range raw {
- rawValue := entry.Value
- var allocation registry.BrickAllocation
- json.Unmarshal(bytes.NewBufferString(rawValue).Bytes(), &allocation)
-
- allocation.DeallocateRequested = true
- entry.Value = toJson(&allocation)
- updated = append(updated, entry)
- keys = append(keys, getBrickAllocationKeyHost(allocation))
- }
- return updated, keys
-}
-
-func (poolRegistry *poolRegistry) DeallocateBricks(volume registry.VolumeName) error {
- var updated []KeyValueVersion
-
- volPrefix := getPrefixAllocationVolume(volume)
- raw, err := poolRegistry.keystore.GetAll(volPrefix)
- if err != nil {
- return nil
- }
- updated, keys := poolRegistry.deallocate(raw, updated)
-
- raw = []KeyValueVersion{}
- for _, key := range keys {
- entry, err := poolRegistry.keystore.Get(key)
- if err != nil {
- return err
- }
- raw = append(raw, entry)
- }
- updated, _ = poolRegistry.deallocate(raw, updated)
- return poolRegistry.keystore.Update(updated)
-}
-
-func (poolRegistry *poolRegistry) HardDeleteAllocations(allocations []registry.BrickAllocation) error {
- var keys []string
- for _, allocation := range allocations {
- keys = append(keys, getBrickAllocationKeyHost(allocation))
- keys = append(keys, getBrickAllocationKeyVolume(allocation))
- if !allocation.DeallocateRequested {
- return fmt.Errorf("must first call deallocate on: %+v", allocation)
- }
- }
-
- var keyValues []KeyValueVersion
- for _, key := range keys {
- keyValue, err := poolRegistry.keystore.Get(key)
- if err != nil {
- return err
- }
- // TODO check we have already called deallocate properly
- keyValues = append(keyValues, keyValue)
- }
- return poolRegistry.keystore.DeleteAll(keyValues)
-}
-
-func (poolRegistry *poolRegistry) getAllocations(prefix string) ([]registry.BrickAllocation, error) {
- raw, err := poolRegistry.keystore.GetAll(prefix)
- if err != nil {
- return nil, err
- }
- var allocations []registry.BrickAllocation
- for _, entry := range raw {
- rawValue := entry.Value
- var allocation registry.BrickAllocation
- json.Unmarshal(bytes.NewBufferString(rawValue).Bytes(), &allocation)
- allocations = append(allocations, allocation)
- }
- return allocations, nil
-}
-
-func (poolRegistry *poolRegistry) GetAllocationsForHost(hostname string) ([]registry.BrickAllocation, error) {
- return poolRegistry.getAllocations(getPrefixAllocationHost(hostname))
-}
-
-func (poolRegistry *poolRegistry) GetAllocationsForVolume(volume registry.VolumeName) ([]registry.BrickAllocation, error) {
- return poolRegistry.getAllocations(getPrefixAllocationVolume(volume))
-}
-
-func (poolRegistry *poolRegistry) GetBrickInfo(hostname string, device string) (registry.BrickInfo, error) {
- raw, error := poolRegistry.keystore.Get(getBrickInfoKey(hostname, device))
- var value registry.BrickInfo
- json.Unmarshal(bytes.NewBufferString(raw.Value).Bytes(), &value)
- return value, error
-}
-
-func (poolRegistry *poolRegistry) GetNewHostBrickAllocations(
- ctxt context.Context, hostname string) <-chan registry.BrickAllocation {
-
- events := make(chan registry.BrickAllocation)
-
- key := getPrefixAllocationHost(hostname)
- rawEvents := poolRegistry.keystore.Watch(ctxt, key, true)
-
- go func() {
- defer close(events)
- if rawEvents == nil {
- return
- }
- for raw := range rawEvents {
- if raw.Err != nil {
- // consider sending error back to the listener? For now force process restart
- log.Panicf("Error when watching %s for new brick hosts: %+v", hostname, raw)
- }
- if raw.IsCreate {
- newBrick := registry.BrickAllocation{}
- err := json.Unmarshal(bytes.NewBufferString(raw.New.Value).Bytes(), &newBrick)
- if err != nil {
- log.Panicf("error parsing create brick host %s event %+v %s", hostname, raw, err)
- } else {
- events <- newBrick
- }
- }
- }
- // we get here if the context is canceled, etc
- }()
-
- return events
-}
-
-func (poolRegistry *poolRegistry) getBricks(prefix string) ([]registry.BrickInfo, error) {
- raw, err := poolRegistry.keystore.GetAll(prefix)
- if err != nil {
- return nil, err
- }
- var allocations []registry.BrickInfo
- for _, entry := range raw {
- rawValue := entry.Value
- var allocation registry.BrickInfo
- json.Unmarshal(bytes.NewBufferString(rawValue).Bytes(), &allocation)
- allocations = append(allocations, allocation)
- }
- return allocations, nil
-}
-
-func (poolRegistry *poolRegistry) Pools() ([]registry.Pool, error) {
- allBricks, _ := poolRegistry.getBricks(registeredBricksPrefix)
- allAllocations, _ := poolRegistry.getAllocations(allocatedBricksPrefix)
-
- allocationLookup := make(map[string]registry.BrickAllocation)
- for _, allocation := range allAllocations {
- key := fmt.Sprintf("%s/%s", allocation.Hostname, allocation.Device)
- allocationLookup[key] = allocation
- }
-
- pools := make(map[string]*registry.Pool)
- hosts := make(map[string]*registry.HostInfo)
- for _, brick := range allBricks {
- pool, ok := pools[brick.PoolName]
- if !ok {
- pool = ®istry.Pool{
- Name: brick.PoolName,
- GranularityGB: brick.CapacityGB,
- AllocatedBricks: []registry.BrickAllocation{},
- AvailableBricks: []registry.BrickInfo{},
- Hosts: make(map[string]registry.HostInfo),
- }
- }
-
- if brick.CapacityGB != pool.GranularityGB {
- log.Printf("brick doesn't match pool granularity: %+v\n", brick)
- if brick.CapacityGB < pool.GranularityGB {
- pool.GranularityGB = brick.CapacityGB
- }
- }
-
- host, ok := hosts[brick.Hostname]
- if !ok {
- hostAlive, _ := poolRegistry.HostAlive(brick.Hostname)
- host = ®istry.HostInfo{
- Hostname: brick.Hostname,
- Alive: hostAlive,
- }
- }
-
- if _, ok := pool.Hosts[brick.Hostname]; !ok {
- pool.Hosts[brick.Hostname] = *host
- }
-
- key := fmt.Sprintf("%s/%s", brick.Hostname, brick.Device)
- allocation, ok := allocationLookup[key]
- if ok {
- pool.AllocatedBricks = append(pool.AllocatedBricks, allocation)
- hosts[brick.Hostname] = host
- pools[brick.PoolName] = pool
- } else {
- if host.Alive {
- pool.AvailableBricks = append(pool.AvailableBricks, brick)
- hosts[brick.Hostname] = host
- pools[brick.PoolName] = pool
- }
- }
- }
-
- var poolList []registry.Pool
- for _, value := range pools {
- poolList = append(poolList, *value)
- }
- return poolList, nil
-}
diff --git a/internal/pkg/keystoreregistry/pool_test.go b/internal/pkg/keystoreregistry/pool_test.go
deleted file mode 100644
index 7a71de58..00000000
--- a/internal/pkg/keystoreregistry/pool_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package keystoreregistry
-
-import (
- "context"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-type fakeKeystore struct {
- watchChan KeyValueUpdateChan
- t *testing.T
- key string
- withPrefix bool
-}
-
-func (fakeKeystore) Close() error {
- panic("implement me")
-}
-func (fakeKeystore) CleanPrefix(prefix string) error {
- panic("implement me")
-}
-func (fakeKeystore) Add(keyValues []KeyValue) error {
- panic("implement me")
-}
-func (fakeKeystore) Update(keyValues []KeyValueVersion) error {
- panic("implement me")
-}
-func (fakeKeystore) DeleteAll(keyValues []KeyValueVersion) error {
- panic("implement me")
-}
-func (fakeKeystore) GetAll(prefix string) ([]KeyValueVersion, error) {
- panic("implement me")
-}
-func (fakeKeystore) Get(key string) (KeyValueVersion, error) {
- panic("implement me")
-}
-func (fakeKeystore) WatchPrefix(prefix string, onUpdate func(old *KeyValueVersion, new *KeyValueVersion)) {
- panic("implement me")
-}
-func (fakeKeystore) WatchKey(ctxt context.Context, key string, onUpdate func(old *KeyValueVersion, new *KeyValueVersion)) {
- panic("implement me")
-}
-func (fk fakeKeystore) Watch(ctxt context.Context, key string, withPrefix bool) KeyValueUpdateChan {
- assert.Equal(fk.t, fk.key, key)
- assert.Equal(fk.t, fk.withPrefix, withPrefix)
- return fk.watchChan
-}
-func (fakeKeystore) KeepAliveKey(key string) error {
- panic("implement me")
-}
-func (fakeKeystore) NewMutex(lockKey string) (Mutex, error) {
- panic("implement me")
-}
-
-func TestPoolRegistry_GetNewHostBrickAllocations(t *testing.T) {
- rawEvents := make(chan KeyValueUpdate)
- reg := poolRegistry{keystore: &fakeKeystore{
- watchChan: rawEvents, t: t, key: "/bricks/allocated/host/host1/", withPrefix: true,
- }}
-
- events := reg.GetNewHostBrickAllocations(context.TODO(), "host1")
-
- go func() {
- rawEvents <- KeyValueUpdate{IsCreate: false}
- rawEvents <- KeyValueUpdate{
- IsCreate: true,
- New: &KeyValueVersion{Value: toJson(registry.BrickAllocation{
- Hostname: "host1", Device: "sdb",
- })},
- }
- rawEvents <- KeyValueUpdate{IsCreate: false}
- close(rawEvents)
- }()
-
- ev1 := <-events
- assert.Equal(t, "host1", ev1.Hostname)
- assert.Equal(t, "sdb", ev1.Device)
-
- _, ok := <-events
- assert.False(t, ok)
- _, ok = <-rawEvents
- assert.False(t, ok)
-}
-
-func TestPoolRegistry_GetNewHostBrickAllocations_nil(t *testing.T) {
- reg := poolRegistry{keystore: &fakeKeystore{
- watchChan: nil, t: t, key: "/bricks/allocated/host/host2/", withPrefix: true,
- }}
-
- events := reg.GetNewHostBrickAllocations(context.TODO(), "host2")
-
- _, ok := <-events
- assert.False(t, ok)
-}
diff --git a/internal/pkg/keystoreregistry/volume.go b/internal/pkg/keystoreregistry/volume.go
deleted file mode 100644
index 82ca4177..00000000
--- a/internal/pkg/keystoreregistry/volume.go
+++ /dev/null
@@ -1,405 +0,0 @@
-package keystoreregistry
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "math/rand"
- "time"
-)
-
-func NewVolumeRegistry(keystore Keystore) registry.VolumeRegistry {
- return &volumeRegistry{keystore}
-}
-
-type volumeRegistry struct {
- keystore Keystore
-}
-
-func (volRegistry *volumeRegistry) AllVolumes() ([]registry.Volume, error) {
- var volumes []registry.Volume
- keyValues, err := volRegistry.keystore.GetAll(volumeKeyPrefix)
- if err != nil {
- return volumes, err
- }
- for _, keyValue := range keyValues {
- var volume registry.Volume
- err = volumeFromKeyValue(keyValue, &volume)
- if err != nil {
- return volumes, nil
- }
- volumes = append(volumes, volume)
- }
- return volumes, nil
-}
-
-func (volRegistry *volumeRegistry) Jobs() ([]registry.Job, error) {
- var jobs []registry.Job
- keyValues, err := volRegistry.keystore.GetAll(jobPrefix)
- for _, keyValue := range keyValues {
- var job registry.Job
- err := json.Unmarshal(bytes.NewBufferString(keyValue.Value).Bytes(), &job)
- if err != nil {
- return jobs, err
- }
- jobs = append(jobs, job)
- }
- return jobs, err
-}
-
-const jobPrefix = "/job/"
-
-func getJobKey(jobName string) string {
- return fmt.Sprintf("%s%s/", jobPrefix, jobName)
-}
-
-func (volRegistry *volumeRegistry) Job(jobName string) (registry.Job, error) {
- var job registry.Job // TODO return a pointer instead?
- keyValue, err := volRegistry.keystore.Get(getJobKey(jobName))
- if err != nil {
- return job, err
- }
- err = json.Unmarshal(bytes.NewBufferString(keyValue.Value).Bytes(), &job)
- if err != nil {
- return job, err
- }
- return job, nil
-}
-
-func (volRegistry *volumeRegistry) AddJob(job registry.Job) error {
- for _, volumeName := range job.MultiJobVolumes {
- volume, err := volRegistry.Volume(volumeName)
- if err != nil {
- return err
- }
- // TODO: what other checks are required?
- if volume.State < registry.Registered {
- return fmt.Errorf("must register volume: %s", volume.Name)
- }
- }
- if job.JobVolume != "" {
- volume, err := volRegistry.Volume(job.JobVolume)
- if err != nil {
- return err
- }
- // TODO: what other checks are required?
- if volume.State < registry.Registered {
- return fmt.Errorf("must register volume: %s", volume.Name)
- }
- }
- return volRegistry.keystore.Add([]KeyValue{
- {Key: getJobKey(job.Name), Value: toJson(job)},
- })
-}
-
-func (volRegistry *volumeRegistry) DeleteJob(jobName string) error {
- keyValue, err := volRegistry.keystore.Get(getJobKey(jobName))
- if err != nil {
- return err
- }
- return volRegistry.keystore.DeleteAll([]KeyValueVersion{keyValue})
-}
-
-func (volRegistry *volumeRegistry) JobAttachHosts(jobName string, hosts []string) error {
- keyValue, err := volRegistry.keystore.Get(getJobKey(jobName))
- if err != nil {
- return err
- }
- var job registry.Job
- err = json.Unmarshal(bytes.NewBufferString(keyValue.Value).Bytes(), &job)
- if err != nil {
- return err
- }
-
- // TODO validate hostnames?
- job.AttachHosts = hosts
- keyValue.Value = toJson(job)
-
- return volRegistry.keystore.Update([]KeyValueVersion{keyValue})
-}
-
-func findAttachment(attachments []registry.Attachment,
- hostname string, jobName string) (*registry.Attachment, bool) {
- for _, candidate := range attachments {
- if candidate.Hostname == hostname && candidate.Job == jobName {
- // TODO: double check for duplicate match?
- return &candidate, true
- }
- }
- return nil, false
-}
-
-func mergeAttachments(oldAttachments []registry.Attachment, updates []registry.Attachment) []registry.Attachment {
- var newAttachments []registry.Attachment
- for _, update := range updates {
- newAttachments = append(newAttachments, update)
- }
-
- // add any existing attachments that don't match an update
- for _, oldAttachment := range oldAttachments {
- _, ok := findAttachment(
- updates, oldAttachment.Hostname, oldAttachment.Job)
- if !ok {
- newAttachments = append(newAttachments, oldAttachment)
- }
- }
- return newAttachments
-}
-
-func (volRegistry *volumeRegistry) UpdateVolumeAttachments(name registry.VolumeName,
- updates []registry.Attachment) error {
- update := func(volume *registry.Volume) error {
- volume.Attachments = mergeAttachments(volume.Attachments, updates)
- return nil
- }
- return volRegistry.updateVolume(name, update)
-}
-
-func (volRegistry *volumeRegistry) DeleteVolumeAttachments(name registry.VolumeName, hostnames []string, jobName string) error {
-
- update := func(volume *registry.Volume) error {
- if volume.Attachments == nil {
- return errors.New("no attachments to delete")
- } else {
- numberRemoved := removeAttachments(volume, jobName, hostnames)
- if numberRemoved != len(hostnames) {
- return fmt.Errorf("unable to find all attachments for volume %s", name)
- }
- }
- return nil
- }
- return volRegistry.updateVolume(name, update)
-}
-
-func removeAttachments(volume *registry.Volume, jobName string, hostnames []string) int {
- var newAttachments []registry.Attachment
- for _, attachment := range volume.Attachments {
- remove := false
- if attachment.Job == jobName {
- for _, host := range hostnames {
- if attachment.Hostname == host {
- remove = true
- break
- }
- }
- }
- if !remove {
- newAttachments = append(newAttachments, attachment)
- }
- }
- numberRemoved := len(volume.Attachments) - len(newAttachments)
- volume.Attachments = newAttachments
- return numberRemoved
-}
-
-func (volRegistry *volumeRegistry) updateVolume(name registry.VolumeName,
- update func(volume *registry.Volume) error) error {
-
- // TODO: if we restructure attachments into separate keys, we can probably ditch this mutex
- mutex, err := volRegistry.keystore.NewMutex(getVolumeKey(string(name)))
- if err != nil {
- return err
- }
- if err := mutex.Lock(context.TODO()); err != nil {
- return err
- }
- defer mutex.Unlock(context.TODO())
-
- keyValue, err := volRegistry.keystore.Get(getVolumeKey(string(name)))
- if err != nil {
- return err
- }
-
- volume := registry.Volume{}
- err = volumeFromKeyValue(keyValue, &volume)
- if err != nil {
- return nil
- }
- if err := update(&volume); err != nil {
- return err
- }
-
- keyValue.Value = toJson(volume)
- return volRegistry.keystore.Update([]KeyValueVersion{keyValue})
-}
-
-func (volRegistry *volumeRegistry) VolumeOperationMutex(name registry.VolumeName) (registry.Mutex, error) {
- return volRegistry.keystore.NewMutex(fmt.Sprintf("operation_%s", name))
-}
-
-func (volRegistry *volumeRegistry) UpdateState(name registry.VolumeName, state registry.VolumeState) error {
- updateState := func(volume *registry.Volume) error {
- stateDifference := state - volume.State
- if stateDifference != 1 && state != registry.Error && state != registry.DeleteRequested {
- return fmt.Errorf("must update volume %s to the next state, current state: %s",
- volume.Name, volume.State)
- }
- volume.State = state
- if state == registry.BricksAllocated {
- // From this point onwards, we know bricks might need to be cleaned up
- volume.HadBricksAssigned = true
- }
- return nil
- }
- return volRegistry.updateVolume(name, updateState)
-}
-
-const volumeKeyPrefix = "/volume/"
-
-func getVolumeKey(volumeName string) string {
- return fmt.Sprintf("%s%s/", volumeKeyPrefix, volumeName)
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
-const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-
-func GetNewUUID() string {
- b := make([]byte, 8)
- for i := range b {
- b[i] = letters[rand.Int63()%int64(len(letters))]
- }
- return string(b)
-}
-
-func (volRegistry *volumeRegistry) AddVolume(volume registry.Volume) error {
- // TODO: both uuid and client port might clash, need to check they don't!!
- volume.UUID = GetNewUUID()
- volume.ClientPort = rand.Intn(50000) + 10000
- return volRegistry.keystore.Add([]KeyValue{{
- Key: getVolumeKey(string(volume.Name)),
- Value: toJson(volume),
- }})
-}
-
-func volumeFromKeyValue(keyValue KeyValueVersion, volume *registry.Volume) error {
- return json.Unmarshal(bytes.NewBufferString(keyValue.Value).Bytes(), &volume)
-}
-
-func (volRegistry *volumeRegistry) Volume(name registry.VolumeName) (registry.Volume, error) {
- volume := registry.Volume{}
- keyValue, err := volRegistry.keystore.Get(getVolumeKey(string(name)))
- if err != nil {
- return volume, err
- }
- err = volumeFromKeyValue(keyValue, &volume)
- if err != nil {
- return volume, nil
- }
- return volume, nil
-}
-
-func (volRegistry *volumeRegistry) DeleteVolume(name registry.VolumeName) error {
- keyValue, err := volRegistry.keystore.Get(getVolumeKey(string(name)))
- if err != nil {
- return err
- }
- return volRegistry.keystore.DeleteAll([]KeyValueVersion{keyValue})
-}
-
-func (volRegistry *volumeRegistry) GetVolumeChanges(ctx context.Context, volume registry.Volume) registry.VolumeChangeChan {
- // TODO: we should watch from the version of the passed in volume
- key := getVolumeKey(string(volume.Name))
- rawEvents := volRegistry.keystore.Watch(ctx, key, false)
-
- events := make(chan registry.VolumeChange)
-
- go func() {
- defer close(events)
- if rawEvents == nil {
- return
- }
- for rawEvent := range rawEvents {
- if rawEvent.Err != nil {
- events <- registry.VolumeChange{Err: rawEvent.Err}
- continue
- }
-
- event := registry.VolumeChange{
- IsDelete: rawEvent.IsDelete,
- Old: nil,
- New: nil,
- }
- if rawEvent.Old != nil {
- oldVolume := ®istry.Volume{}
- if err := volumeFromKeyValue(*rawEvent.Old, oldVolume); err != nil {
- event.Err = err
- } else {
- event.Old = oldVolume
- }
- }
- if rawEvent.New != nil {
- newVolume := ®istry.Volume{}
- if err := volumeFromKeyValue(*rawEvent.New, newVolume); err != nil {
- event.Err = err
- } else {
- event.New = newVolume
- }
- }
- events <- event
- }
- }()
-
- return events
-}
-
-func (volRegistry *volumeRegistry) WaitForState(volumeName registry.VolumeName, state registry.VolumeState) error {
- log.Println("Start waiting for volume", volumeName, "to reach state", state)
- err := volRegistry.WaitForCondition(volumeName, func(event *registry.VolumeChange) bool {
- if event.New == nil {
- log.Panicf("unable to process event %+v", event)
- }
- return event.New.State == state || event.New.State == registry.Error
- })
- log.Println("Stopped waiting for volume", volumeName, "to reach state", state, err)
- if err != nil {
- return err
- }
-
- // return error if we went to an error state
- volume, err := volRegistry.Volume(volumeName)
- if err == nil && volume.State == registry.Error {
- return fmt.Errorf("stopped waiting as volume %s in error state", volumeName)
- }
- return err
-}
-
-// TODO: maybe have environment variable to tune this wait time?
-var defaultTimeout = time.Minute * 10
-
-func (volRegistry *volumeRegistry) WaitForCondition(volumeName registry.VolumeName,
- condition func(event *registry.VolumeChange) bool) error {
-
- volume, err := volRegistry.Volume(volumeName)
- if err != nil {
- return err
- }
-
- ctxt, cancelFunc := context.WithTimeout(context.Background(), defaultTimeout)
- events := volRegistry.GetVolumeChanges(ctxt, volume)
- defer cancelFunc()
-
- log.Printf("About to wait for condition on volume: %+v", volume)
-
- for event := range events {
- if event.Err != nil {
- return event.Err
- }
- if event.IsDelete {
- return fmt.Errorf("stopped waiting as volume %s is deleted", volume.Name)
- }
-
- conditionMet := condition(&event)
- if conditionMet {
- return nil
- }
- }
-
- return fmt.Errorf("stopped waiting for volume %s to meet supplied condition", volume.Name)
-}
diff --git a/internal/pkg/keystoreregistry/volume_test.go b/internal/pkg/keystoreregistry/volume_test.go
deleted file mode 100644
index 3af42647..00000000
--- a/internal/pkg/keystoreregistry/volume_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package keystoreregistry
-
-import (
- "context"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestVolumeRegistry_DeleteVolumeAttachments(t *testing.T) {
- vol := registry.Volume{
- Attachments: []registry.Attachment{
- {Hostname: "host1", Job: "job1"},
- {Hostname: "host2", Job: "job1"},
- {Hostname: "host2", Job: "job2"},
- },
- }
- numRemoved := removeAttachments(&vol, "job1", []string{"host1", "host2"})
- assert.Equal(t, 2, numRemoved)
- assert.Equal(t, 1, len(vol.Attachments))
-}
-
-func TestVolumeRegistry_FindAttachment(t *testing.T) {
- attachments := []registry.Attachment{
- {Job: "job1", Hostname: "foo1"}, {Job: "job2", Hostname: "foo1"}, {Job: "job2", Hostname: "foo2"},
- }
-
- attachment, ok := findAttachment(nil, "", "")
- assert.Nil(t, attachment)
- assert.False(t, ok)
-
- attachment, ok = findAttachment(attachments, "foo2", "job1")
- assert.Nil(t, attachment)
- assert.False(t, ok)
-
- attachment, ok = findAttachment(attachments, "foo1", "job1")
- assert.True(t, ok)
- assert.Equal(t, registry.Attachment{Job: "job1", Hostname: "foo1"}, *attachment)
-
- attachment, ok = findAttachment(attachments, "foo1", "job2")
- assert.True(t, ok)
- assert.Equal(t, registry.Attachment{Job: "job2", Hostname: "foo1"}, *attachment)
-}
-
-func TestVolumeRegistry_MergeAttachments(t *testing.T) {
- oldAttachments := []registry.Attachment{
- {Job: "job1", Hostname: "foo1"}, {Job: "job2", Hostname: "foo1"}, {Job: "job2", Hostname: "foo2"},
- }
-
- assert.Nil(t, mergeAttachments(nil, nil))
- assert.Equal(t, oldAttachments, mergeAttachments(oldAttachments, nil))
- assert.Equal(t, oldAttachments, mergeAttachments(nil, oldAttachments))
-
- // add new
- result := mergeAttachments(oldAttachments, []registry.Attachment{{Job: "foo", Hostname: "foo"}})
- assert.Equal(t, 4, len(result))
- assert.Equal(t, registry.Attachment{Job: "foo", Hostname: "foo"}, result[0])
- assert.Equal(t, oldAttachments[0], result[1])
- assert.Equal(t, oldAttachments[1], result[2])
- assert.Equal(t, oldAttachments[2], result[3])
-
- // in place update
- updates := []registry.Attachment{
- {Job: "job2", Hostname: "foo1", State: registry.Attached},
- {Job: "job2", Hostname: "foo2", State: registry.Attached},
- }
- result = mergeAttachments(oldAttachments, updates)
- assert.Equal(t, 3, len(result))
- assert.Equal(t, updates[0], result[0])
- assert.Equal(t, updates[1], result[1])
- assert.Equal(t, oldAttachments[0], result[2])
-}
-
-func TestVolumeRegistry_GetVolumeChanges_nil(t *testing.T) {
- volReg := volumeRegistry{keystore: fakeKeystore{watchChan: nil, t: t, key: "/volume/vol1/"}}
-
- changes := volReg.GetVolumeChanges(context.TODO(), registry.Volume{Name: "vol1"})
-
- _, ok := <-changes
- assert.False(t, ok)
-}
-
-func TestVolumeRegistry_GetVolumeChanges(t *testing.T) {
- raw := make(chan KeyValueUpdate)
- volReg := volumeRegistry{keystore: fakeKeystore{
- watchChan: raw, t: t, key: "/volume/vol1/", withPrefix: false,
- }}
-
- changes := volReg.GetVolumeChanges(context.TODO(), registry.Volume{Name: "vol1"})
-
- vol := ®istry.Volume{Name: "test1"}
-
- go func() {
- raw <- KeyValueUpdate{
- New: &KeyValueVersion{Key: "asdf", Value: toJson(vol)},
- Old: &KeyValueVersion{Key: "asdf", Value: toJson(vol)},
- }
- raw <- KeyValueUpdate{
- IsDelete: true,
- New: nil,
- Old: &KeyValueVersion{Key: "asdf", Value: toJson(vol)},
- }
- raw <- KeyValueUpdate{
- New: &KeyValueVersion{Key: "asdf", Value: "asdf"},
- Old: nil,
- }
- close(raw)
- }()
-
- ch1 := <-changes
- assert.Nil(t, ch1.Err)
- assert.False(t, ch1.IsDelete)
- assert.Equal(t, vol, ch1.Old)
- assert.Equal(t, vol, ch1.New)
-
- ch2 := <-changes
- assert.Nil(t, ch2.Err)
- assert.True(t, ch2.IsDelete)
- assert.Nil(t, ch2.New)
- assert.Equal(t, vol, ch1.Old)
-
- ch3 := <-changes
- assert.Equal(t, "invalid character 'a' looking for beginning of value", ch3.Err.Error())
- assert.False(t, ch3.IsDelete)
- assert.Nil(t, ch3.Old)
- assert.Nil(t, ch3.New)
-
- _, ok := <-changes
- assert.False(t, ok)
- _, ok = <-raw
- assert.False(t, ok)
-}
diff --git a/internal/pkg/lifecycle/brickmanager/bricks.go b/internal/pkg/lifecycle/brickmanager/bricks.go
deleted file mode 100644
index 18a41c7f..00000000
--- a/internal/pkg/lifecycle/brickmanager/bricks.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package brickmanager
-
-import (
- "context"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/pfsprovider/ansible"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "strings"
-)
-
-func setupBrickEventHandlers(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry,
- hostname string) {
-
- newBricks := poolRegistry.GetNewHostBrickAllocations(context.Background(), hostname)
- go func() {
- for brick := range newBricks {
- if brick.AllocatedIndex == 0 {
- log.Printf("found new primary brick %+v", brick)
- go processNewPrimaryBlock(poolRegistry, volumeRegistry, &brick)
- } else {
- log.Printf("ignore brick create, as it is not a primary brick %+v", brick)
- }
- }
- log.Panic("we appear to have stopped watching for new bricks")
- }()
-
- allocations, err := poolRegistry.GetAllocationsForHost(hostname)
- if err != nil {
- if !strings.Contains(err.Error(), "unable to find any values") {
- log.Panic(err)
- }
- }
-
- for _, allocation := range allocations {
- volume, err := volumeRegistry.Volume(allocation.AllocatedVolume)
- if err != nil {
- log.Panicf("unable to find volume for allocation %+v", allocation)
- }
- if allocation.AllocatedIndex == 0 {
- log.Printf("Start watching again, as we host a primary brick for: %+v", volume)
- // TODO: do we finish watching correctly?
- watchForVolumeChanges(poolRegistry, volumeRegistry, volume)
-
- // TODO: trigger events if we missed the "edge" already
- if volume.State == registry.DeleteRequested {
- log.Println("Complete pending delete request for volume:", volume.Name)
- processDelete(poolRegistry, volumeRegistry, volume)
- }
- }
- }
-
- // TODO what about catching up with changes while we were down, make sure system in correct state!!
-}
-
-func processNewPrimaryBlock(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry,
- new *registry.BrickAllocation) {
- volume, err := volumeRegistry.Volume(new.AllocatedVolume)
- if err != nil {
- log.Printf("Could not file volume: %s because: %s\n", new.AllocatedVolume, err)
- return
- }
- log.Println("Found new volume to watch:", volume.Name)
- log.Println(volume)
-
- watchForVolumeChanges(poolRegistry, volumeRegistry, volume)
-
- // Move to new state, ignored by above watch
- provisionNewVolume(poolRegistry, volumeRegistry, volume)
-}
-
-func watchForVolumeChanges(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry,
- volume registry.Volume) {
-
- ctxt, cancelFunc := context.WithCancel(context.Background())
- changes := volumeRegistry.GetVolumeChanges(ctxt, volume)
-
- go func() {
- defer cancelFunc()
-
- for change := range changes {
- old := change.Old
- new := change.New
-
- if change.IsDelete {
- log.Printf("Stop watching volume: %s", volume.Name)
- return
- }
-
- if old == nil || new == nil {
- log.Printf("nil volume seen, unable to process volume event: %+v", change)
- }
-
- if change.Err != nil {
- log.Printf("Error while waiting for volume %s saw error: %s with: %+v",
- volume.Name, change.Err.Error(), change)
- }
-
- if new.State != old.State {
- log.Printf("volume:%s state move: %s -> %s", new.Name, old.State, new.State)
- switch new.State {
- case registry.DataInRequested:
- processDataIn(volumeRegistry, *new)
- case registry.DataOutRequested:
- processDataOut(volumeRegistry, *new)
- case registry.DeleteRequested:
- processDelete(poolRegistry, volumeRegistry, *new)
- case registry.BricksDeleted:
- log.Println("Volume", new.Name, "has had bricks deleted.")
- default:
- // Ignore the state changes we triggered
- log.Printf("ignore volume state move %+v", change)
- }
- }
-
- if len(new.Attachments) > len(old.Attachments) {
- var attachRequested []registry.Attachment
- for _, newAttachment := range new.Attachments {
- isNew := false
- if old.Attachments == nil {
- isNew = true
- } else {
- _, ok := old.FindMatchingAttachment(newAttachment)
- isNew = !ok
- }
- if isNew && newAttachment.State == registry.RequestAttach {
- attachRequested = append(attachRequested, newAttachment)
- }
- }
- if attachRequested != nil && len(attachRequested) > 0 {
- processAttach(poolRegistry, volumeRegistry, *new, attachRequested)
- }
- }
-
- if len(new.Attachments) == len(old.Attachments) && new.Attachments != nil && old.Attachments != nil {
- var detachRequested []registry.Attachment
- for _, newAttachment := range new.Attachments {
- oldAttachment, ok := old.FindMatchingAttachment(newAttachment)
- if ok && newAttachment.State == registry.RequestDetach && oldAttachment.State == registry.Attached {
- detachRequested = append(detachRequested, newAttachment)
- }
- }
- if len(detachRequested) > 0 {
- processDetach(poolRegistry, volumeRegistry, *new, detachRequested)
- }
- }
- }
- }()
-}
-
-func handleError(volumeRegistry registry.VolumeRegistry, volume registry.Volume, err error) {
- if err != nil {
- log.Println("Error provisioning", volume.Name, err)
- err = volumeRegistry.UpdateState(volume.Name, registry.Error) // TODO record an error string?
- if err != nil {
- log.Println("Unable to move volume", volume.Name, "to Error state")
- }
- }
-}
-
-// TODO: should not be hardcoded here
-var FSType = ansible.Lustre
-var plugin = ansible.GetPlugin(FSType)
-
-func provisionNewVolume(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry, volume registry.Volume) {
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
- // Note: this blocks all delete requests till we are finished, which stops nasty races in ansible
-
- // TODO: fetch fresh copy of volume now we have aquired the lock? Ensure no delete has been processed already
- if volume.State != registry.Registered {
- log.Println("Volume in bad initial state:", volume.Name)
- return
- }
-
- bricks, err := poolRegistry.GetAllocationsForVolume(volume.Name)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- // Before we provision the bricks, notify that we have seen the volume
- err = volumeRegistry.UpdateState(volume.Name, registry.BricksAllocated)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- err = plugin.VolumeProvider().SetupVolume(volume, bricks)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- err = volumeRegistry.UpdateState(volume.Name, registry.BricksProvisioned)
- handleError(volumeRegistry, volume, err)
-}
-
-func processDataIn(volumeRegistry registry.VolumeRegistry, volume registry.Volume) {
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
-
- // TODO: check volume is not deleted already, etc.
-
- err = plugin.VolumeProvider().CopyDataIn(volume)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- err = volumeRegistry.UpdateState(volume.Name, registry.DataInComplete)
- handleError(volumeRegistry, volume, err)
-}
-
-func processAttach(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry, volume registry.Volume,
- attachments []registry.Attachment) {
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
-
- bricks, err := poolRegistry.GetAllocationsForVolume(volume.Name)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
- err = plugin.Mounter().Mount(volume, bricks, attachments) // TODO pass down specific attachments?
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- // TODO: this is really odd, mount should probably do this?
- updates := []registry.Attachment{}
- for _, attachment := range attachments {
- if attachment.State == registry.RequestAttach {
- attachment.State = registry.Attached
- updates = append(updates, attachment)
- }
-
- }
- // TODO: what can we do if we hit an error here?
- volumeRegistry.UpdateVolumeAttachments(volume.Name, updates)
-}
-
-func processDetach(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry, volume registry.Volume,
- attachments []registry.Attachment) {
-
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
-
- bricks, err := poolRegistry.GetAllocationsForVolume(volume.Name)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- err = plugin.Mounter().Unmount(volume, bricks, attachments) // TODO pass down specific attachments?
- if err != nil {
- // TODO: update specific attachment into an error state?
- handleError(volumeRegistry, volume, err)
- }
-
- var updates []registry.Attachment
- for _, attachment := range attachments {
- if attachment.State == registry.RequestDetach {
- attachment.State = registry.Detached
- updates = append(updates, attachment)
- }
- }
- volumeRegistry.UpdateVolumeAttachments(volume.Name, updates)
-}
-
-func processDataOut(volumeRegistry registry.VolumeRegistry, volume registry.Volume) {
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
-
- err = plugin.VolumeProvider().CopyDataOut(volume)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- }
-
- err = volumeRegistry.UpdateState(volume.Name, registry.DataOutComplete)
- handleError(volumeRegistry, volume, err)
-}
-
-func processDelete(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry, volume registry.Volume) {
- // TODO: error handling!!
- mutex, err := volumeRegistry.VolumeOperationMutex(volume.Name)
- mutex.Lock(context.TODO())
- defer mutex.Unlock(context.TODO())
-
- bricks, err := poolRegistry.GetAllocationsForVolume(volume.Name)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- return
- }
-
- err = plugin.VolumeProvider().TeardownVolume(volume, bricks)
- if err != nil {
- handleError(volumeRegistry, volume, err)
- }
-
- err = volumeRegistry.UpdateState(volume.Name, registry.BricksDeleted)
- handleError(volumeRegistry, volume, err)
-}
diff --git a/internal/pkg/lifecycle/brickmanager/manager.go b/internal/pkg/lifecycle/brickmanager/manager.go
deleted file mode 100644
index 679adfb3..00000000
--- a/internal/pkg/lifecycle/brickmanager/manager.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package brickmanager
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "os"
- "strconv"
- "strings"
-)
-
-type BrickManager interface {
- Start() error
- Hostname() string
-}
-
-func NewBrickManager(poolRegistry registry.PoolRegistry, volumeRegistry registry.VolumeRegistry) BrickManager {
- return &brickManager{poolRegistry, volumeRegistry, getHostname()}
-}
-
-func getHostname() string {
- // TODO: make this configurable?
- hostname, err := os.Hostname()
- if err != nil {
- log.Fatal(err)
- }
- return hostname
-}
-
-type brickManager struct {
- poolRegistry registry.PoolRegistry
- volumeRegistry registry.VolumeRegistry
- hostname string
-}
-
-func (bm *brickManager) Hostname() string {
- return bm.hostname
-}
-
-func (bm *brickManager) Start() error {
- updateBricks(bm.poolRegistry, bm.hostname)
-
- // TODO, on startup see what existing allocations there are, and watch those volumes
- setupBrickEventHandlers(bm.poolRegistry, bm.volumeRegistry, bm.hostname)
-
- // Do this after registering all bricks and their handlers, and any required tidy up
- notifyStarted(bm.poolRegistry, bm.hostname)
-
- // Check after the processes have started up
- outputDebugLogs(bm.poolRegistry, bm.hostname)
-
- return nil
-}
-
-const DefaultDeviceAddress = "nvme%dn1"
-const DefaultDeviceCapacityGB = 1400
-const DefaultPoolName = "default"
-
-func getDevices(devicesStr string, devType string) []string {
- // TODO: check for real devices!
- count, err := strconv.Atoi(devicesStr)
- if err != nil {
- count = 12
- }
-
- if devType == "" || !strings.Contains(devType, "%d") {
- devType = DefaultDeviceAddress
- }
-
- var bricks []string
- for i := 0; i < count; i++ {
- device := fmt.Sprintf(devType, i)
- bricks = append(bricks, device)
- }
- return bricks
-}
-
-func getBricks(devices []string, hostname string, capacityStr string, poolName string) []registry.BrickInfo {
- capacity, ok := strconv.Atoi(capacityStr)
- if ok != nil || capacityStr == "" || capacity <= 0 {
- capacity = DefaultDeviceCapacityGB
- }
- if poolName == "" {
- poolName = DefaultPoolName
- }
-
- var bricks []registry.BrickInfo
- for _, device := range devices {
- bricks = append(bricks, registry.BrickInfo{
- Device: device,
- Hostname: hostname,
- CapacityGB: uint(capacity),
- PoolName: poolName,
- })
- }
- return bricks
-}
-
-func updateBricks(poolRegistry registry.PoolRegistry, hostname string) {
- devicesStr := os.Getenv("DEVICE_COUNT")
- devType := os.Getenv("DEVICE_TYPE")
- devices := getDevices(devicesStr, devType)
-
- capacityStr := os.Getenv("DAC_DEVICE_CAPACITY_GB")
- poolName := os.Getenv("DAC_POOL_NAME")
- bricks := getBricks(devices, hostname, capacityStr, poolName)
-
- err := poolRegistry.UpdateHost(bricks)
- if err != nil {
- log.Fatalln(err)
- }
-}
-
-func outputDebugLogs(poolRegistry registry.PoolRegistry, hostname string) {
- allocations, err := poolRegistry.GetAllocationsForHost(hostname)
- if err != nil {
- // Ignore errors, we may not have any results when there are no allocations
- // TODO: maybe stop returing an error for the empty case?
- log.Println(err)
- }
- log.Println("Current allocations:", allocations)
-
- pools, err := poolRegistry.Pools()
- if err != nil {
- log.Fatalln(err)
- }
- log.Println("Current pools:", pools)
-}
-
-func notifyStarted(poolRegistry registry.PoolRegistry, hostname string) {
- err := poolRegistry.KeepAliveHost(hostname)
- if err != nil {
- log.Fatalln(err)
- }
-}
diff --git a/internal/pkg/lifecycle/brickmanager/manager_test.go b/internal/pkg/lifecycle/brickmanager/manager_test.go
deleted file mode 100644
index b16a8a75..00000000
--- a/internal/pkg/lifecycle/brickmanager/manager_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package brickmanager
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestGetDevices(t *testing.T) {
- devices := getDevices("5", "")
- assert.Equal(t, 5, len(devices))
- assert.Equal(t, "nvme0n1", devices[0])
- assert.Equal(t, "nvme4n1", devices[4])
-
- devices = getDevices("asdf", "loop%d")
- assert.Equal(t, 12, len(devices))
- assert.Equal(t, "loop0", devices[0])
- assert.Equal(t, "loop11", devices[11])
-}
-
-func TestGetBricks(t *testing.T) {
- devices := []string{"a", "b"}
- bricks := getBricks(devices, "host", "-1", "")
-
- assert.Equal(t, 2, len(bricks))
- assert.Equal(t, registry.BrickInfo{
- Device: "a", Hostname: "host", PoolName: "default", CapacityGB: 1400,
- }, bricks[0])
- assert.Equal(t, registry.BrickInfo{
- Device: "b", Hostname: "host", PoolName: "default", CapacityGB: 1400,
- }, bricks[1])
-
- bricks = getBricks(devices, "host", "20", "foo")
- assert.Equal(t, registry.BrickInfo{
- Device: "b", Hostname: "host", PoolName: "foo", CapacityGB: 20,
- }, bricks[1])
-}
diff --git a/internal/pkg/lifecycle/volume.go b/internal/pkg/lifecycle/volume.go
deleted file mode 100644
index 02ce3853..00000000
--- a/internal/pkg/lifecycle/volume.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package lifecycle
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
-)
-
-type VolumeLifecycleManager interface {
- ProvisionBricks() error
- DataIn() error
- Mount(hosts []string, jobName string) error
- Unmount(hosts []string, jobName string) error
- DataOut() error
- Delete() error // TODO allow context for timeout and cancel?
-}
-
-func NewVolumeLifecycleManager(volumeRegistry registry.VolumeRegistry, poolRegistry registry.PoolRegistry,
- volume registry.Volume) VolumeLifecycleManager {
- return &volumeLifecycleManager{volumeRegistry, poolRegistry, volume}
-}
-
-type volumeLifecycleManager struct {
- volumeRegistry registry.VolumeRegistry
- poolRegistry registry.PoolRegistry
- volume registry.Volume
-}
-
-func (vlm *volumeLifecycleManager) ProvisionBricks() error {
- _, err := vlm.poolRegistry.AllocateBricksForVolume(vlm.volume)
- if err != nil {
- return err
- }
-
- // if there are no bricks requested, don't wait for a provision that will never happen
- if vlm.volume.SizeBricks != 0 {
- err = vlm.volumeRegistry.WaitForState(vlm.volume.Name, registry.BricksProvisioned)
- }
- return err
-}
-
-func (vlm *volumeLifecycleManager) Delete() error {
- // TODO convert errors into volume related errors, somewhere?
- log.Println("Deleting volume:", vlm.volume.Name, vlm.volume)
-
- if vlm.volume.SizeBricks == 0 {
- log.Println("No bricks to delete, skipping request delete bricks for:", vlm.volume.Name)
- } else if vlm.volume.HadBricksAssigned == false {
- allocations, _ := vlm.poolRegistry.GetAllocationsForVolume(vlm.volume.Name)
- if len(allocations) == 0 {
- // TODO should we be holding a lock here?
- log.Println("No bricks yet assigned, skip delete bricks.")
- } else {
- return fmt.Errorf("bricks assigned but dacd hasn't noticed them yet for: %+v", vlm.volume)
- }
- } else {
- log.Printf("Requested delete of %d bricks for %s", vlm.volume.SizeBricks, vlm.volume.Name)
- err := vlm.volumeRegistry.UpdateState(vlm.volume.Name, registry.DeleteRequested)
- if err != nil {
- return err
- }
- err = vlm.volumeRegistry.WaitForState(vlm.volume.Name, registry.BricksDeleted)
- if err != nil {
- return err
- }
- log.Println("Bricks deleted by brick manager for:", vlm.volume.Name)
-
- // TODO should we error out here when one of these steps fail?
- err = vlm.poolRegistry.DeallocateBricks(vlm.volume.Name)
- if err != nil {
- return err
- }
- allocations, err := vlm.poolRegistry.GetAllocationsForVolume(vlm.volume.Name)
- if err != nil {
- return err
- }
- // TODO we should really wait for the brick manager to call this API
- err = vlm.poolRegistry.HardDeleteAllocations(allocations)
- if err != nil {
- return err
- }
- log.Println("Allocations all deleted, count:", len(allocations))
- }
-
- // TODO: what about any pending mounts that might get left behind for job?
-
- log.Println("Deleting volume record in registry for:", vlm.volume.Name)
- return vlm.volumeRegistry.DeleteVolume(vlm.volume.Name)
-}
-
-func (vlm *volumeLifecycleManager) DataIn() error {
- if vlm.volume.SizeBricks == 0 {
- log.Println("skipping datain for:", vlm.volume.Name)
- return nil
- }
-
- err := vlm.volumeRegistry.UpdateState(vlm.volume.Name, registry.DataInRequested)
- if err != nil {
- return err
- }
- return vlm.volumeRegistry.WaitForState(vlm.volume.Name, registry.DataInComplete)
-}
-
-func (vlm *volumeLifecycleManager) Mount(hosts []string, jobName string) error {
- if vlm.volume.SizeBricks == 0 {
- log.Println("skipping mount for:", vlm.volume.Name) // TODO: should never happen now?
- return nil
- }
-
- if vlm.volume.State != registry.BricksProvisioned && vlm.volume.State != registry.DataInComplete {
- return fmt.Errorf("unable to mount volume: %s in state: %s", vlm.volume.Name, vlm.volume.State)
- }
-
- var attachments []registry.Attachment
- for _, host := range hosts {
- attachments = append(attachments, registry.Attachment{
- Hostname: host, State: registry.RequestAttach, Job: jobName,
- })
- }
-
- if err := vlm.volumeRegistry.UpdateVolumeAttachments(vlm.volume.Name, attachments); err != nil {
- return err
- }
-
- // TODO: should share code with Unmount!!
- var volumeInErrorState bool
- err := vlm.volumeRegistry.WaitForCondition(vlm.volume.Name, func(event *registry.VolumeChange) bool {
- if event.New.State == registry.Error {
- volumeInErrorState = true
- return true
- }
- allAttached := false
- for _, host := range hosts {
-
- var isAttached bool
- for _, attachment := range event.New.Attachments {
- if attachment.Job == jobName && attachment.Hostname == host {
- if attachment.State == registry.Attached {
- isAttached = true
- } else if attachment.State == registry.AttachmentError {
- // found an error bail out early
- volumeInErrorState = true
- return true // Return true to stop the waiting
- } else {
- isAttached = false
- }
- break
- }
- }
-
- if isAttached {
- allAttached = true
- } else {
- allAttached = false
- break
- }
- }
- return allAttached
- })
- if volumeInErrorState {
- return fmt.Errorf("unable to mount volume: %s", vlm.volume.Name)
- }
- return err
-}
-
-func (vlm *volumeLifecycleManager) Unmount(hosts []string, jobName string) error {
- if vlm.volume.SizeBricks == 0 {
- log.Println("skipping postrun for:", vlm.volume.Name) // TODO return error type and handle outside?
- return nil
- }
-
- if vlm.volume.State != registry.BricksProvisioned && vlm.volume.State != registry.DataInComplete {
- return fmt.Errorf("unable to unmount volume: %s in state: %s", vlm.volume.Name, vlm.volume.State)
- }
-
- var updates []registry.Attachment
- for _, host := range hosts {
- attachment, ok := vlm.volume.FindAttachment(host, jobName)
- if !ok {
- return fmt.Errorf(
- "can't find attachment for volume: %s host: %s job: %s",
- vlm.volume.Name, host, jobName)
- }
-
- if attachment.State != registry.Attached {
- return fmt.Errorf("attachment must be attached to do unmount for volume: %s", vlm.volume.Name)
- }
- attachment.State = registry.RequestDetach
- updates = append(updates, *attachment)
- }
- // TODO: I think we need to split attachments out of the volume object to avoid the races
- if err := vlm.volumeRegistry.UpdateVolumeAttachments(vlm.volume.Name, updates); err != nil {
- return err
- }
-
- // TODO: must share way more code and do more tests on this logic!!
- var volumeInErrorState error
- err := vlm.volumeRegistry.WaitForCondition(vlm.volume.Name, func(event *registry.VolumeChange) bool {
- if event.New.State == registry.Error {
- volumeInErrorState = fmt.Errorf("volume %s now in error state", event.New.Name)
- return true
- }
- allDettached := false
- for _, host := range hosts {
- newAttachment, ok := event.New.FindAttachment(host, jobName)
- if !ok {
- // TODO: debug log or something?
- volumeInErrorState = fmt.Errorf("unable to find attachment for host: %s", host)
- return true
- }
-
- if newAttachment.State == registry.AttachmentError {
- // found an error bail out early
- volumeInErrorState = fmt.Errorf("attachment for host %s in error state", host)
- return true
- }
-
- if newAttachment.State == registry.Detached {
- allDettached = true
- } else {
- allDettached = false
- break
- }
- }
- return allDettached
- })
- if volumeInErrorState != nil {
- return fmt.Errorf("unable to unmount volume: %s because: %s", vlm.volume.Name, volumeInErrorState)
- }
- if err != nil {
- return err
- }
- return vlm.volumeRegistry.DeleteVolumeAttachments(vlm.volume.Name, hosts, jobName)
-}
-
-func (vlm *volumeLifecycleManager) DataOut() error {
- if vlm.volume.SizeBricks == 0 {
- log.Println("skipping data_out for:", vlm.volume.Name)
- return nil
- }
-
- err := vlm.volumeRegistry.UpdateState(vlm.volume.Name, registry.DataOutRequested)
- if err != nil {
- return err
- }
- return vlm.volumeRegistry.WaitForState(vlm.volume.Name, registry.DataOutComplete)
-}
diff --git a/internal/pkg/lifecycle/volume_test.go b/internal/pkg/lifecycle/volume_test.go
deleted file mode 100644
index fb18ce82..00000000
--- a/internal/pkg/lifecycle/volume_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package lifecycle
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/mocks"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestVolumeLifecycleManager_Mount(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
-
- volume := registry.Volume{
- Name: "vol1", SizeBricks: 3, State: registry.BricksProvisioned, JobName: "job1"}
- vlm := NewVolumeLifecycleManager(mockVolReg, nil, volume)
- hosts := []string{"host1", "host2"}
-
- mockVolReg.EXPECT().UpdateVolumeAttachments(volume.Name, []registry.Attachment{
- {Hostname: "host1", State: registry.RequestAttach, Job: "job1"},
- {Hostname: "host2", State: registry.RequestAttach, Job: "job1"},
- })
- fakeWait := func(volumeName registry.VolumeName, condition func(event *registry.VolumeChange) bool) error {
- event := ®istry.VolumeChange{New: ®istry.Volume{}}
- assert.False(t, condition(event))
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2", State: registry.Detached},
- {Hostname: "host1", Job: "job1", State: registry.Attached},
- {Hostname: "host2", Job: "job1", State: registry.Attached},
- }
- assert.True(t, condition(event))
-
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2", State: registry.AttachmentError},
- {Hostname: "host1", Job: "job1", State: registry.Detached},
- {Hostname: "host2", Job: "job1", State: registry.Attached},
- }
- assert.False(t, condition(event))
-
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2", State: registry.Attached},
- {Hostname: "host1", Job: "job1", State: registry.AttachmentError},
- {Hostname: "host2", Job: "job1", State: registry.Attached},
- }
- assert.True(t, condition(event))
- return nil
- }
- mockVolReg.EXPECT().WaitForCondition(volume.Name, gomock.Any()).DoAndReturn(fakeWait)
-
- err := vlm.Mount(hosts, "job1")
- assert.Equal(t, "unable to mount volume: vol1", err.Error())
-}
-
-func TestVolumeLifecycleManager_Unmount(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
- mockVolReg := mocks.NewMockVolumeRegistry(mockCtrl)
-
- volume := registry.Volume{
- Name: "vol1", SizeBricks: 3, State: registry.BricksProvisioned, JobName: "job1",
- Attachments: []registry.Attachment{
- {Hostname: "host1", Job: "job1", State: registry.Attached},
- {Hostname: "host2", Job: "job1", State: registry.Attached},
- {Hostname: "host1", Job: "job2"},
- }}
- vlm := NewVolumeLifecycleManager(mockVolReg, nil, volume)
- hosts := []string{"host1", "host2"}
-
- mockVolReg.EXPECT().UpdateVolumeAttachments(volume.Name, []registry.Attachment{
- {Hostname: "host1", State: registry.RequestDetach, Job: "job1"},
- {Hostname: "host2", State: registry.RequestDetach, Job: "job1"},
- })
- fakeWait := func(volumeName registry.VolumeName, condition func(event *registry.VolumeChange) bool) error {
- event := ®istry.VolumeChange{New: ®istry.Volume{}}
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2"},
- {Hostname: "host1", Job: "job1", State: registry.Detached},
- {Hostname: "host2", Job: "job1", State: registry.Detached},
- }
- assert.True(t, condition(event))
-
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2", State: registry.AttachmentError},
- {Hostname: "host1", Job: "job1", State: registry.Detached},
- {Hostname: "host2", Job: "job1", State: registry.Attached},
- }
- assert.False(t, condition(event))
-
- event.New.Attachments = []registry.Attachment{
- {Hostname: "host1", Job: "job2"},
- {Hostname: "host1", Job: "job1", State: registry.AttachmentError},
- {Hostname: "host2", Job: "job1", State: registry.Detached},
- }
- assert.True(t, condition(event))
- return nil
- }
- mockVolReg.EXPECT().WaitForCondition(volume.Name, gomock.Any()).DoAndReturn(fakeWait)
-
- err := vlm.Unmount(hosts, "job2")
- assert.Equal(t, "attachment must be attached to do unmount for volume: vol1", err.Error())
-
- err = vlm.Unmount(hosts, "job3")
- assert.Equal(t, "can't find attachment for volume: vol1 host: host1 job: job3", err.Error())
-
- err = vlm.Unmount(hosts, "job1")
- assert.Equal(t, "unable to unmount volume: vol1 because: attachment for host host1 in error state", err.Error())
-}
diff --git a/internal/pkg/mock_facade/session.go b/internal/pkg/mock_facade/session.go
new file mode 100644
index 00000000..0899939a
--- /dev/null
+++ b/internal/pkg/mock_facade/session.go
@@ -0,0 +1,178 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/facade/session.go
+
+// Package mock_facade is a generated GoMock package.
+package mock_facade
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockSession is a mock of Session interface
+type MockSession struct {
+ ctrl *gomock.Controller
+ recorder *MockSessionMockRecorder
+}
+
+// MockSessionMockRecorder is the mock recorder for MockSession
+type MockSessionMockRecorder struct {
+ mock *MockSession
+}
+
+// NewMockSession creates a new mock instance
+func NewMockSession(ctrl *gomock.Controller) *MockSession {
+ mock := &MockSession{ctrl: ctrl}
+ mock.recorder = &MockSessionMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSession) EXPECT() *MockSessionMockRecorder {
+ return m.recorder
+}
+
+// CreateSession mocks base method
+func (m *MockSession) CreateSession(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSession", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CreateSession indicates an expected call of CreateSession
+func (mr *MockSessionMockRecorder) CreateSession(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSession", reflect.TypeOf((*MockSession)(nil).CreateSession), session)
+}
+
+// DeleteSession mocks base method
+func (m *MockSession) DeleteSession(sessionName datamodel.SessionName, hurry bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSession", sessionName, hurry)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteSession indicates an expected call of DeleteSession
+func (mr *MockSessionMockRecorder) DeleteSession(sessionName, hurry interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSession", reflect.TypeOf((*MockSession)(nil).DeleteSession), sessionName, hurry)
+}
+
+// CopyDataIn mocks base method
+func (m *MockSession) CopyDataIn(sessionName datamodel.SessionName) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CopyDataIn", sessionName)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CopyDataIn indicates an expected call of CopyDataIn
+func (mr *MockSessionMockRecorder) CopyDataIn(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyDataIn", reflect.TypeOf((*MockSession)(nil).CopyDataIn), sessionName)
+}
+
+// Mount mocks base method
+func (m *MockSession) Mount(sessionName datamodel.SessionName, computeNodes, loginNodes []string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Mount", sessionName, computeNodes, loginNodes)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Mount indicates an expected call of Mount
+func (mr *MockSessionMockRecorder) Mount(sessionName, computeNodes, loginNodes interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mount", reflect.TypeOf((*MockSession)(nil).Mount), sessionName, computeNodes, loginNodes)
+}
+
+// Unmount mocks base method
+func (m *MockSession) Unmount(sessionName datamodel.SessionName) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Unmount", sessionName)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Unmount indicates an expected call of Unmount
+func (mr *MockSessionMockRecorder) Unmount(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmount", reflect.TypeOf((*MockSession)(nil).Unmount), sessionName)
+}
+
+// CopyDataOut mocks base method
+func (m *MockSession) CopyDataOut(sessionName datamodel.SessionName) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CopyDataOut", sessionName)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CopyDataOut indicates an expected call of CopyDataOut
+func (mr *MockSessionMockRecorder) CopyDataOut(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyDataOut", reflect.TypeOf((*MockSession)(nil).CopyDataOut), sessionName)
+}
+
+// GetPools mocks base method
+func (m *MockSession) GetPools() ([]datamodel.PoolInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPools")
+ ret0, _ := ret[0].([]datamodel.PoolInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPools indicates an expected call of GetPools
+func (mr *MockSessionMockRecorder) GetPools() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPools", reflect.TypeOf((*MockSession)(nil).GetPools))
+}
+
+// GetSession mocks base method
+func (m *MockSession) GetSession(sessionName datamodel.SessionName) (datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSession", sessionName)
+ ret0, _ := ret[0].(datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSession indicates an expected call of GetSession
+func (mr *MockSessionMockRecorder) GetSession(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSession", reflect.TypeOf((*MockSession)(nil).GetSession), sessionName)
+}
+
+// GetAllSessions mocks base method
+func (m *MockSession) GetAllSessions() ([]datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAllSessions")
+ ret0, _ := ret[0].([]datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAllSessions indicates an expected call of GetAllSessions
+func (mr *MockSessionMockRecorder) GetAllSessions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllSessions", reflect.TypeOf((*MockSession)(nil).GetAllSessions))
+}
+
+// GenerateAnsible mocks base method
+func (m *MockSession) GenerateAnsible(sessionName datamodel.SessionName) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateAnsible", sessionName)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateAnsible indicates an expected call of GenerateAnsible
+func (mr *MockSessionMockRecorder) GenerateAnsible(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateAnsible", reflect.TypeOf((*MockSession)(nil).GenerateAnsible), sessionName)
+}
diff --git a/internal/pkg/mock_facade/session_action_handler.go b/internal/pkg/mock_facade/session_action_handler.go
new file mode 100644
index 00000000..61626363
--- /dev/null
+++ b/internal/pkg/mock_facade/session_action_handler.go
@@ -0,0 +1,58 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/facade/session_action_handler.go
+
+// Package mock_facade is a generated GoMock package.
+package mock_facade
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockSessionActionHandler is a mock of SessionActionHandler interface
+type MockSessionActionHandler struct {
+ ctrl *gomock.Controller
+ recorder *MockSessionActionHandlerMockRecorder
+}
+
+// MockSessionActionHandlerMockRecorder is the mock recorder for MockSessionActionHandler
+type MockSessionActionHandlerMockRecorder struct {
+ mock *MockSessionActionHandler
+}
+
+// NewMockSessionActionHandler creates a new mock instance
+func NewMockSessionActionHandler(ctrl *gomock.Controller) *MockSessionActionHandler {
+ mock := &MockSessionActionHandler{ctrl: ctrl}
+ mock.recorder = &MockSessionActionHandlerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSessionActionHandler) EXPECT() *MockSessionActionHandlerMockRecorder {
+ return m.recorder
+}
+
+// ProcessSessionAction mocks base method
+func (m *MockSessionActionHandler) ProcessSessionAction(action datamodel.SessionAction) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ProcessSessionAction", action)
+}
+
+// ProcessSessionAction indicates an expected call of ProcessSessionAction
+func (mr *MockSessionActionHandlerMockRecorder) ProcessSessionAction(action interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessSessionAction", reflect.TypeOf((*MockSessionActionHandler)(nil).ProcessSessionAction), action)
+}
+
+// RestoreSession mocks base method
+func (m *MockSessionActionHandler) RestoreSession(session datamodel.Session) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "RestoreSession", session)
+}
+
+// RestoreSession indicates an expected call of RestoreSession
+func (mr *MockSessionActionHandlerMockRecorder) RestoreSession(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreSession", reflect.TypeOf((*MockSessionActionHandler)(nil).RestoreSession), session)
+}
diff --git a/internal/pkg/mocks/disk_mock.go b/internal/pkg/mock_fileio/disk_mock.go
similarity index 95%
rename from internal/pkg/mocks/disk_mock.go
rename to internal/pkg/mock_fileio/disk_mock.go
index 115e16e3..78beb650 100644
--- a/internal/pkg/mocks/disk_mock.go
+++ b/internal/pkg/mock_fileio/disk_mock.go
@@ -1,8 +1,8 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: internal/pkg/fileio/disk.go
-// Package mocks is a generated GoMock package.
-package mocks
+// Package mock_fileio is a generated GoMock package.
+package mock_fileio
import (
gomock "github.com/golang/mock/gomock"
diff --git a/internal/pkg/mock_filesystem/ansible.go b/internal/pkg/mock_filesystem/ansible.go
new file mode 100644
index 00000000..15dd8ade
--- /dev/null
+++ b/internal/pkg/mock_filesystem/ansible.go
@@ -0,0 +1,49 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/filesystem/ansible.go
+
+// Package mock_filesystem is a generated GoMock package.
+package mock_filesystem
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockAnsible is a mock of Ansible interface
+type MockAnsible struct {
+ ctrl *gomock.Controller
+ recorder *MockAnsibleMockRecorder
+}
+
+// MockAnsibleMockRecorder is the mock recorder for MockAnsible
+type MockAnsibleMockRecorder struct {
+ mock *MockAnsible
+}
+
+// NewMockAnsible creates a new mock instance
+func NewMockAnsible(ctrl *gomock.Controller) *MockAnsible {
+ mock := &MockAnsible{ctrl: ctrl}
+ mock.recorder = &MockAnsibleMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockAnsible) EXPECT() *MockAnsibleMockRecorder {
+ return m.recorder
+}
+
+// CreateEnvironment mocks base method
+func (m *MockAnsible) CreateEnvironment(session datamodel.Session) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateEnvironment", session)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateEnvironment indicates an expected call of CreateEnvironment
+func (mr *MockAnsibleMockRecorder) CreateEnvironment(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEnvironment", reflect.TypeOf((*MockAnsible)(nil).CreateEnvironment), session)
+}
diff --git a/internal/pkg/mock_filesystem/provider.go b/internal/pkg/mock_filesystem/provider.go
new file mode 100644
index 00000000..ec65fa8d
--- /dev/null
+++ b/internal/pkg/mock_filesystem/provider.go
@@ -0,0 +1,133 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/filesystem/provider.go
+
+// Package mock_filesystem is a generated GoMock package.
+package mock_filesystem
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockProvider is a mock of Provider interface
+type MockProvider struct {
+ ctrl *gomock.Controller
+ recorder *MockProviderMockRecorder
+}
+
+// MockProviderMockRecorder is the mock recorder for MockProvider
+type MockProviderMockRecorder struct {
+ mock *MockProvider
+}
+
+// NewMockProvider creates a new mock instance
+func NewMockProvider(ctrl *gomock.Controller) *MockProvider {
+ mock := &MockProvider{ctrl: ctrl}
+ mock.recorder = &MockProviderMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockProvider) EXPECT() *MockProviderMockRecorder {
+ return m.recorder
+}
+
+// Create mocks base method
+func (m *MockProvider) Create(session datamodel.Session) (datamodel.FilesystemStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Create", session)
+ ret0, _ := ret[0].(datamodel.FilesystemStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Create indicates an expected call of Create
+func (mr *MockProviderMockRecorder) Create(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockProvider)(nil).Create), session)
+}
+
+// Restore mocks base method
+func (m *MockProvider) Restore(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Restore", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Restore indicates an expected call of Restore
+func (mr *MockProviderMockRecorder) Restore(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restore", reflect.TypeOf((*MockProvider)(nil).Restore), session)
+}
+
+// Delete mocks base method
+func (m *MockProvider) Delete(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Delete", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Delete indicates an expected call of Delete
+func (mr *MockProviderMockRecorder) Delete(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockProvider)(nil).Delete), session)
+}
+
+// DataCopyIn mocks base method
+func (m *MockProvider) DataCopyIn(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DataCopyIn", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DataCopyIn indicates an expected call of DataCopyIn
+func (mr *MockProviderMockRecorder) DataCopyIn(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCopyIn", reflect.TypeOf((*MockProvider)(nil).DataCopyIn), session)
+}
+
+// DataCopyOut mocks base method
+func (m *MockProvider) DataCopyOut(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DataCopyOut", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DataCopyOut indicates an expected call of DataCopyOut
+func (mr *MockProviderMockRecorder) DataCopyOut(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCopyOut", reflect.TypeOf((*MockProvider)(nil).DataCopyOut), session)
+}
+
+// Mount mocks base method
+func (m *MockProvider) Mount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Mount", session, attachments)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Mount indicates an expected call of Mount
+func (mr *MockProviderMockRecorder) Mount(session, attachments interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mount", reflect.TypeOf((*MockProvider)(nil).Mount), session, attachments)
+}
+
+// Unmount mocks base method
+func (m *MockProvider) Unmount(session datamodel.Session, attachments datamodel.AttachmentSessionStatus) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Unmount", session, attachments)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Unmount indicates an expected call of Unmount
+func (mr *MockProviderMockRecorder) Unmount(session, attachments interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmount", reflect.TypeOf((*MockProvider)(nil).Unmount), session, attachments)
+}
diff --git a/internal/pkg/mock_registry/brick_allocation.go b/internal/pkg/mock_registry/brick_allocation.go
new file mode 100644
index 00000000..74cfebbd
--- /dev/null
+++ b/internal/pkg/mock_registry/brick_allocation.go
@@ -0,0 +1,110 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/registry/brick_allocation.go
+
+// Package mock_registry is a generated GoMock package.
+package mock_registry
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ store "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockAllocationRegistry is a mock of AllocationRegistry interface
+type MockAllocationRegistry struct {
+ ctrl *gomock.Controller
+ recorder *MockAllocationRegistryMockRecorder
+}
+
+// MockAllocationRegistryMockRecorder is the mock recorder for MockAllocationRegistry
+type MockAllocationRegistryMockRecorder struct {
+ mock *MockAllocationRegistry
+}
+
+// NewMockAllocationRegistry creates a new mock instance
+func NewMockAllocationRegistry(ctrl *gomock.Controller) *MockAllocationRegistry {
+ mock := &MockAllocationRegistry{ctrl: ctrl}
+ mock.recorder = &MockAllocationRegistryMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockAllocationRegistry) EXPECT() *MockAllocationRegistryMockRecorder {
+ return m.recorder
+}
+
+// GetAllocationMutex mocks base method
+func (m *MockAllocationRegistry) GetAllocationMutex() (store.Mutex, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAllocationMutex")
+ ret0, _ := ret[0].(store.Mutex)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAllocationMutex indicates an expected call of GetAllocationMutex
+func (mr *MockAllocationRegistryMockRecorder) GetAllocationMutex() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllocationMutex", reflect.TypeOf((*MockAllocationRegistry)(nil).GetAllocationMutex))
+}
+
+// GetPool mocks base method
+func (m *MockAllocationRegistry) GetPool(name datamodel.PoolName) (datamodel.Pool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPool", name)
+ ret0, _ := ret[0].(datamodel.Pool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPool indicates an expected call of GetPool
+func (mr *MockAllocationRegistryMockRecorder) GetPool(name interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPool", reflect.TypeOf((*MockAllocationRegistry)(nil).GetPool), name)
+}
+
+// EnsurePoolCreated mocks base method
+func (m *MockAllocationRegistry) EnsurePoolCreated(poolName datamodel.PoolName, granularityBytes uint) (datamodel.Pool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EnsurePoolCreated", poolName, granularityBytes)
+ ret0, _ := ret[0].(datamodel.Pool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EnsurePoolCreated indicates an expected call of EnsurePoolCreated
+func (mr *MockAllocationRegistryMockRecorder) EnsurePoolCreated(poolName, granularityBytes interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsurePoolCreated", reflect.TypeOf((*MockAllocationRegistry)(nil).EnsurePoolCreated), poolName, granularityBytes)
+}
+
+// GetAllPoolInfos mocks base method
+func (m *MockAllocationRegistry) GetAllPoolInfos() ([]datamodel.PoolInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAllPoolInfos")
+ ret0, _ := ret[0].([]datamodel.PoolInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAllPoolInfos indicates an expected call of GetAllPoolInfos
+func (mr *MockAllocationRegistryMockRecorder) GetAllPoolInfos() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllPoolInfos", reflect.TypeOf((*MockAllocationRegistry)(nil).GetAllPoolInfos))
+}
+
+// GetPoolInfo mocks base method
+func (m *MockAllocationRegistry) GetPoolInfo(poolName datamodel.PoolName) (datamodel.PoolInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPoolInfo", poolName)
+ ret0, _ := ret[0].(datamodel.PoolInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPoolInfo indicates an expected call of GetPoolInfo
+func (mr *MockAllocationRegistryMockRecorder) GetPoolInfo(poolName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolInfo", reflect.TypeOf((*MockAllocationRegistry)(nil).GetPoolInfo), poolName)
+}
diff --git a/internal/pkg/mock_registry/brick_host.go b/internal/pkg/mock_registry/brick_host.go
new file mode 100644
index 00000000..a1251a7f
--- /dev/null
+++ b/internal/pkg/mock_registry/brick_host.go
@@ -0,0 +1,93 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/registry/brick_host.go
+
+// Package mock_registry is a generated GoMock package.
+package mock_registry
+
+import (
+ context "context"
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockBrickHostRegistry is a mock of BrickHostRegistry interface
+type MockBrickHostRegistry struct {
+ ctrl *gomock.Controller
+ recorder *MockBrickHostRegistryMockRecorder
+}
+
+// MockBrickHostRegistryMockRecorder is the mock recorder for MockBrickHostRegistry
+type MockBrickHostRegistryMockRecorder struct {
+ mock *MockBrickHostRegistry
+}
+
+// NewMockBrickHostRegistry creates a new mock instance
+func NewMockBrickHostRegistry(ctrl *gomock.Controller) *MockBrickHostRegistry {
+ mock := &MockBrickHostRegistry{ctrl: ctrl}
+ mock.recorder = &MockBrickHostRegistryMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockBrickHostRegistry) EXPECT() *MockBrickHostRegistryMockRecorder {
+ return m.recorder
+}
+
+// UpdateBrickHost mocks base method
+func (m *MockBrickHostRegistry) UpdateBrickHost(brickHostInfo datamodel.BrickHost) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateBrickHost", brickHostInfo)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// UpdateBrickHost indicates an expected call of UpdateBrickHost
+func (mr *MockBrickHostRegistryMockRecorder) UpdateBrickHost(brickHostInfo interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBrickHost", reflect.TypeOf((*MockBrickHostRegistry)(nil).UpdateBrickHost), brickHostInfo)
+}
+
+// GetAllBrickHosts mocks base method
+func (m *MockBrickHostRegistry) GetAllBrickHosts() ([]datamodel.BrickHost, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAllBrickHosts")
+ ret0, _ := ret[0].([]datamodel.BrickHost)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAllBrickHosts indicates an expected call of GetAllBrickHosts
+func (mr *MockBrickHostRegistryMockRecorder) GetAllBrickHosts() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllBrickHosts", reflect.TypeOf((*MockBrickHostRegistry)(nil).GetAllBrickHosts))
+}
+
+// KeepAliveHost mocks base method
+func (m *MockBrickHostRegistry) KeepAliveHost(ctxt context.Context, brickHostName datamodel.BrickHostName) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "KeepAliveHost", ctxt, brickHostName)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// KeepAliveHost indicates an expected call of KeepAliveHost
+func (mr *MockBrickHostRegistryMockRecorder) KeepAliveHost(ctxt, brickHostName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAliveHost", reflect.TypeOf((*MockBrickHostRegistry)(nil).KeepAliveHost), ctxt, brickHostName)
+}
+
+// IsBrickHostAlive mocks base method
+func (m *MockBrickHostRegistry) IsBrickHostAlive(brickHostName datamodel.BrickHostName) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsBrickHostAlive", brickHostName)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// IsBrickHostAlive indicates an expected call of IsBrickHostAlive
+func (mr *MockBrickHostRegistryMockRecorder) IsBrickHostAlive(brickHostName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBrickHostAlive", reflect.TypeOf((*MockBrickHostRegistry)(nil).IsBrickHostAlive), brickHostName)
+}
diff --git a/internal/pkg/mock_registry/session.go b/internal/pkg/mock_registry/session.go
new file mode 100644
index 00000000..cfd289ad
--- /dev/null
+++ b/internal/pkg/mock_registry/session.go
@@ -0,0 +1,124 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/registry/session.go
+
+// Package mock_registry is a generated GoMock package.
+package mock_registry
+
+import (
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ store "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockSessionRegistry is a mock of SessionRegistry interface
+type MockSessionRegistry struct {
+ ctrl *gomock.Controller
+ recorder *MockSessionRegistryMockRecorder
+}
+
+// MockSessionRegistryMockRecorder is the mock recorder for MockSessionRegistry
+type MockSessionRegistryMockRecorder struct {
+ mock *MockSessionRegistry
+}
+
+// NewMockSessionRegistry creates a new mock instance
+func NewMockSessionRegistry(ctrl *gomock.Controller) *MockSessionRegistry {
+ mock := &MockSessionRegistry{ctrl: ctrl}
+ mock.recorder = &MockSessionRegistryMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSessionRegistry) EXPECT() *MockSessionRegistryMockRecorder {
+ return m.recorder
+}
+
+// GetSessionMutex mocks base method
+func (m *MockSessionRegistry) GetSessionMutex(sessionName datamodel.SessionName) (store.Mutex, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSessionMutex", sessionName)
+ ret0, _ := ret[0].(store.Mutex)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSessionMutex indicates an expected call of GetSessionMutex
+func (mr *MockSessionRegistryMockRecorder) GetSessionMutex(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSessionMutex", reflect.TypeOf((*MockSessionRegistry)(nil).GetSessionMutex), sessionName)
+}
+
+// CreateSession mocks base method
+func (m *MockSessionRegistry) CreateSession(session datamodel.Session) (datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSession", session)
+ ret0, _ := ret[0].(datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateSession indicates an expected call of CreateSession
+func (mr *MockSessionRegistryMockRecorder) CreateSession(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSession", reflect.TypeOf((*MockSessionRegistry)(nil).CreateSession), session)
+}
+
+// GetSession mocks base method
+func (m *MockSessionRegistry) GetSession(sessionName datamodel.SessionName) (datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSession", sessionName)
+ ret0, _ := ret[0].(datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSession indicates an expected call of GetSession
+func (mr *MockSessionRegistryMockRecorder) GetSession(sessionName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSession", reflect.TypeOf((*MockSessionRegistry)(nil).GetSession), sessionName)
+}
+
+// GetAllSessions mocks base method
+func (m *MockSessionRegistry) GetAllSessions() ([]datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAllSessions")
+ ret0, _ := ret[0].([]datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAllSessions indicates an expected call of GetAllSessions
+func (mr *MockSessionRegistryMockRecorder) GetAllSessions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllSessions", reflect.TypeOf((*MockSessionRegistry)(nil).GetAllSessions))
+}
+
+// UpdateSession mocks base method
+func (m *MockSessionRegistry) UpdateSession(session datamodel.Session) (datamodel.Session, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSession", session)
+ ret0, _ := ret[0].(datamodel.Session)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSession indicates an expected call of UpdateSession
+func (mr *MockSessionRegistryMockRecorder) UpdateSession(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSession", reflect.TypeOf((*MockSessionRegistry)(nil).UpdateSession), session)
+}
+
+// DeleteSession mocks base method
+func (m *MockSessionRegistry) DeleteSession(session datamodel.Session) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSession", session)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteSession indicates an expected call of DeleteSession
+func (mr *MockSessionRegistryMockRecorder) DeleteSession(session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSession", reflect.TypeOf((*MockSessionRegistry)(nil).DeleteSession), session)
+}
diff --git a/internal/pkg/mock_registry/session_actions.go b/internal/pkg/mock_registry/session_actions.go
new file mode 100644
index 00000000..40d73269
--- /dev/null
+++ b/internal/pkg/mock_registry/session_actions.go
@@ -0,0 +1,94 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/registry/session_actions.go
+
+// Package mock_registry is a generated GoMock package.
+package mock_registry
+
+import (
+ context "context"
+ datamodel "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockSessionActions is a mock of SessionActions interface
+type MockSessionActions struct {
+ ctrl *gomock.Controller
+ recorder *MockSessionActionsMockRecorder
+}
+
+// MockSessionActionsMockRecorder is the mock recorder for MockSessionActions
+type MockSessionActionsMockRecorder struct {
+ mock *MockSessionActions
+}
+
+// NewMockSessionActions creates a new mock instance
+func NewMockSessionActions(ctrl *gomock.Controller) *MockSessionActions {
+ mock := &MockSessionActions{ctrl: ctrl}
+ mock.recorder = &MockSessionActionsMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSessionActions) EXPECT() *MockSessionActionsMockRecorder {
+ return m.recorder
+}
+
+// SendSessionAction mocks base method
+func (m *MockSessionActions) SendSessionAction(ctxt context.Context, actionType datamodel.SessionActionType, session datamodel.Session) (<-chan datamodel.SessionAction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SendSessionAction", ctxt, actionType, session)
+ ret0, _ := ret[0].(<-chan datamodel.SessionAction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SendSessionAction indicates an expected call of SendSessionAction
+func (mr *MockSessionActionsMockRecorder) SendSessionAction(ctxt, actionType, session interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendSessionAction", reflect.TypeOf((*MockSessionActions)(nil).SendSessionAction), ctxt, actionType, session)
+}
+
+// GetSessionActionRequests mocks base method
+func (m *MockSessionActions) GetSessionActionRequests(ctxt context.Context, brickHostName datamodel.BrickHostName) (<-chan datamodel.SessionAction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSessionActionRequests", ctxt, brickHostName)
+ ret0, _ := ret[0].(<-chan datamodel.SessionAction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSessionActionRequests indicates an expected call of GetSessionActionRequests
+func (mr *MockSessionActionsMockRecorder) GetSessionActionRequests(ctxt, brickHostName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSessionActionRequests", reflect.TypeOf((*MockSessionActions)(nil).GetSessionActionRequests), ctxt, brickHostName)
+}
+
+// GetOutstandingSessionActionRequests mocks base method
+func (m *MockSessionActions) GetOutstandingSessionActionRequests(brickHostName datamodel.BrickHostName) ([]datamodel.SessionAction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOutstandingSessionActionRequests", brickHostName)
+ ret0, _ := ret[0].([]datamodel.SessionAction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOutstandingSessionActionRequests indicates an expected call of GetOutstandingSessionActionRequests
+func (mr *MockSessionActionsMockRecorder) GetOutstandingSessionActionRequests(brickHostName interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOutstandingSessionActionRequests", reflect.TypeOf((*MockSessionActions)(nil).GetOutstandingSessionActionRequests), brickHostName)
+}
+
+// CompleteSessionAction mocks base method
+func (m *MockSessionActions) CompleteSessionAction(action datamodel.SessionAction) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CompleteSessionAction", action)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CompleteSessionAction indicates an expected call of CompleteSessionAction
+func (mr *MockSessionActionsMockRecorder) CompleteSessionAction(action interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteSessionAction", reflect.TypeOf((*MockSessionActions)(nil).CompleteSessionAction), action)
+}
diff --git a/internal/pkg/mock_store/keystore.go b/internal/pkg/mock_store/keystore.go
new file mode 100644
index 00000000..b3ff0e4a
--- /dev/null
+++ b/internal/pkg/mock_store/keystore.go
@@ -0,0 +1,247 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: internal/pkg/store/keystore.go
+
+// Package mock_store is a generated GoMock package.
+package mock_store
+
+import (
+ context "context"
+ store "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ gomock "github.com/golang/mock/gomock"
+ reflect "reflect"
+)
+
+// MockKeystore is a mock of Keystore interface
+type MockKeystore struct {
+ ctrl *gomock.Controller
+ recorder *MockKeystoreMockRecorder
+}
+
+// MockKeystoreMockRecorder is the mock recorder for MockKeystore
+type MockKeystoreMockRecorder struct {
+ mock *MockKeystore
+}
+
+// NewMockKeystore creates a new mock instance
+func NewMockKeystore(ctrl *gomock.Controller) *MockKeystore {
+ mock := &MockKeystore{ctrl: ctrl}
+ mock.recorder = &MockKeystoreMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {
+ return m.recorder
+}
+
+// Close mocks base method
+func (m *MockKeystore) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockKeystoreMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockKeystore)(nil).Close))
+}
+
+// Create mocks base method
+func (m *MockKeystore) Create(key string, value []byte) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Create", key, value)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Create indicates an expected call of Create
+func (mr *MockKeystoreMockRecorder) Create(key, value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKeystore)(nil).Create), key, value)
+}
+
+// Update mocks base method
+func (m *MockKeystore) Update(key string, value []byte, modRevision int64) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Update", key, value, modRevision)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Update indicates an expected call of Update
+func (mr *MockKeystoreMockRecorder) Update(key, value, modRevision interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockKeystore)(nil).Update), key, value, modRevision)
+}
+
+// Delete mocks base method
+func (m *MockKeystore) Delete(key string, modRevision int64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Delete", key, modRevision)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Delete indicates an expected call of Delete
+func (mr *MockKeystoreMockRecorder) Delete(key, modRevision interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKeystore)(nil).Delete), key, modRevision)
+}
+
+// DeleteAllKeysWithPrefix mocks base method
+func (m *MockKeystore) DeleteAllKeysWithPrefix(keyPrefix string) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAllKeysWithPrefix", keyPrefix)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAllKeysWithPrefix indicates an expected call of DeleteAllKeysWithPrefix
+func (mr *MockKeystoreMockRecorder) DeleteAllKeysWithPrefix(keyPrefix interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllKeysWithPrefix", reflect.TypeOf((*MockKeystore)(nil).DeleteAllKeysWithPrefix), keyPrefix)
+}
+
+// GetAll mocks base method
+func (m *MockKeystore) GetAll(keyPrefix string) ([]store.KeyValueVersion, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAll", keyPrefix)
+ ret0, _ := ret[0].([]store.KeyValueVersion)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAll indicates an expected call of GetAll
+func (mr *MockKeystoreMockRecorder) GetAll(keyPrefix interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockKeystore)(nil).GetAll), keyPrefix)
+}
+
+// Get mocks base method
+func (m *MockKeystore) Get(key string) (store.KeyValueVersion, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Get", key)
+ ret0, _ := ret[0].(store.KeyValueVersion)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Get indicates an expected call of Get
+func (mr *MockKeystoreMockRecorder) Get(key interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKeystore)(nil).Get), key)
+}
+
+// IsExist mocks base method
+func (m *MockKeystore) IsExist(key string) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsExist", key)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// IsExist indicates an expected call of IsExist
+func (mr *MockKeystoreMockRecorder) IsExist(key interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsExist", reflect.TypeOf((*MockKeystore)(nil).IsExist), key)
+}
+
+// Watch mocks base method
+func (m *MockKeystore) Watch(ctxt context.Context, key string, withPrefix bool) store.KeyValueUpdateChan {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Watch", ctxt, key, withPrefix)
+ ret0, _ := ret[0].(store.KeyValueUpdateChan)
+ return ret0
+}
+
+// Watch indicates an expected call of Watch
+func (mr *MockKeystoreMockRecorder) Watch(ctxt, key, withPrefix interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKeystore)(nil).Watch), ctxt, key, withPrefix)
+}
+
+// KeepAliveKey mocks base method
+func (m *MockKeystore) KeepAliveKey(ctxt context.Context, key string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "KeepAliveKey", ctxt, key)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// KeepAliveKey indicates an expected call of KeepAliveKey
+func (mr *MockKeystoreMockRecorder) KeepAliveKey(ctxt, key interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAliveKey", reflect.TypeOf((*MockKeystore)(nil).KeepAliveKey), ctxt, key)
+}
+
+// NewMutex mocks base method
+func (m *MockKeystore) NewMutex(lockKey string) (store.Mutex, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NewMutex", lockKey)
+ ret0, _ := ret[0].(store.Mutex)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NewMutex indicates an expected call of NewMutex
+func (mr *MockKeystoreMockRecorder) NewMutex(lockKey interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMutex", reflect.TypeOf((*MockKeystore)(nil).NewMutex), lockKey)
+}
+
+// MockMutex is a mock of Mutex interface
+type MockMutex struct {
+ ctrl *gomock.Controller
+ recorder *MockMutexMockRecorder
+}
+
+// MockMutexMockRecorder is the mock recorder for MockMutex
+type MockMutexMockRecorder struct {
+ mock *MockMutex
+}
+
+// NewMockMutex creates a new mock instance
+func NewMockMutex(ctrl *gomock.Controller) *MockMutex {
+ mock := &MockMutex{ctrl: ctrl}
+ mock.recorder = &MockMutexMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockMutex) EXPECT() *MockMutexMockRecorder {
+ return m.recorder
+}
+
+// Lock mocks base method
+func (m *MockMutex) Lock(ctx context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Lock", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Lock indicates an expected call of Lock
+func (mr *MockMutexMockRecorder) Lock(ctx interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockMutex)(nil).Lock), ctx)
+}
+
+// Unlock mocks base method
+func (m *MockMutex) Unlock(ctx context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Unlock", ctx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Unlock indicates an expected call of Unlock
+func (mr *MockMutexMockRecorder) Unlock(ctx interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockMutex)(nil).Unlock), ctx)
+}
diff --git a/internal/pkg/mocks/job_mock.go b/internal/pkg/mocks/job_mock.go
deleted file mode 100644
index 478a1b56..00000000
--- a/internal/pkg/mocks/job_mock.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: internal/pkg/dacctl/job.go
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- gomock "github.com/golang/mock/gomock"
-)
-
-// MockjobCommand is a mock of jobCommand interface
-type MockjobCommand struct {
- ctrl *gomock.Controller
- recorder *MockjobCommandMockRecorder
-}
-
-// MockjobCommandMockRecorder is the mock recorder for MockjobCommand
-type MockjobCommandMockRecorder struct {
- mock *MockjobCommand
-}
-
-// NewMockjobCommand creates a new mock instance
-func NewMockjobCommand(ctrl *gomock.Controller) *MockjobCommand {
- mock := &MockjobCommand{ctrl: ctrl}
- mock.recorder = &MockjobCommandMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockjobCommand) EXPECT() *MockjobCommandMockRecorder {
- return m.recorder
-}
diff --git a/internal/pkg/mocks/pfsprovider_mock.go b/internal/pkg/mocks/pfsprovider_mock.go
deleted file mode 100644
index 26fa23ca..00000000
--- a/internal/pkg/mocks/pfsprovider_mock.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: internal/pkg/pfsprovider/interface.go
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- pfsprovider "github.com/RSE-Cambridge/data-acc/internal/pkg/pfsprovider"
- registry "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- gomock "github.com/golang/mock/gomock"
- reflect "reflect"
-)
-
-// MockPlugin is a mock of Plugin interface
-type MockPlugin struct {
- ctrl *gomock.Controller
- recorder *MockPluginMockRecorder
-}
-
-// MockPluginMockRecorder is the mock recorder for MockPlugin
-type MockPluginMockRecorder struct {
- mock *MockPlugin
-}
-
-// NewMockPlugin creates a new mock instance
-func NewMockPlugin(ctrl *gomock.Controller) *MockPlugin {
- mock := &MockPlugin{ctrl: ctrl}
- mock.recorder = &MockPluginMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockPlugin) EXPECT() *MockPluginMockRecorder {
- return m.recorder
-}
-
-// Mounter mocks base method
-func (m *MockPlugin) Mounter() pfsprovider.Mounter {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Mounter")
- ret0, _ := ret[0].(pfsprovider.Mounter)
- return ret0
-}
-
-// Mounter indicates an expected call of Mounter
-func (mr *MockPluginMockRecorder) Mounter() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mounter", reflect.TypeOf((*MockPlugin)(nil).Mounter))
-}
-
-// VolumeProvider mocks base method
-func (m *MockPlugin) VolumeProvider() pfsprovider.VolumeProvider {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "VolumeProvider")
- ret0, _ := ret[0].(pfsprovider.VolumeProvider)
- return ret0
-}
-
-// VolumeProvider indicates an expected call of VolumeProvider
-func (mr *MockPluginMockRecorder) VolumeProvider() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeProvider", reflect.TypeOf((*MockPlugin)(nil).VolumeProvider))
-}
-
-// MockVolumeProvider is a mock of VolumeProvider interface
-type MockVolumeProvider struct {
- ctrl *gomock.Controller
- recorder *MockVolumeProviderMockRecorder
-}
-
-// MockVolumeProviderMockRecorder is the mock recorder for MockVolumeProvider
-type MockVolumeProviderMockRecorder struct {
- mock *MockVolumeProvider
-}
-
-// NewMockVolumeProvider creates a new mock instance
-func NewMockVolumeProvider(ctrl *gomock.Controller) *MockVolumeProvider {
- mock := &MockVolumeProvider{ctrl: ctrl}
- mock.recorder = &MockVolumeProviderMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockVolumeProvider) EXPECT() *MockVolumeProviderMockRecorder {
- return m.recorder
-}
-
-// SetupVolume mocks base method
-func (m *MockVolumeProvider) SetupVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "SetupVolume", volume, brickAllocations)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// SetupVolume indicates an expected call of SetupVolume
-func (mr *MockVolumeProviderMockRecorder) SetupVolume(volume, brickAllocations interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupVolume", reflect.TypeOf((*MockVolumeProvider)(nil).SetupVolume), volume, brickAllocations)
-}
-
-// TeardownVolume mocks base method
-func (m *MockVolumeProvider) TeardownVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "TeardownVolume", volume, brickAllocations)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// TeardownVolume indicates an expected call of TeardownVolume
-func (mr *MockVolumeProviderMockRecorder) TeardownVolume(volume, brickAllocations interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TeardownVolume", reflect.TypeOf((*MockVolumeProvider)(nil).TeardownVolume), volume, brickAllocations)
-}
-
-// CopyDataIn mocks base method
-func (m *MockVolumeProvider) CopyDataIn(volume registry.Volume) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CopyDataIn", volume)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CopyDataIn indicates an expected call of CopyDataIn
-func (mr *MockVolumeProviderMockRecorder) CopyDataIn(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyDataIn", reflect.TypeOf((*MockVolumeProvider)(nil).CopyDataIn), volume)
-}
-
-// CopyDataOut mocks base method
-func (m *MockVolumeProvider) CopyDataOut(volume registry.Volume) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CopyDataOut", volume)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CopyDataOut indicates an expected call of CopyDataOut
-func (mr *MockVolumeProviderMockRecorder) CopyDataOut(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyDataOut", reflect.TypeOf((*MockVolumeProvider)(nil).CopyDataOut), volume)
-}
-
-// MockMounter is a mock of Mounter interface
-type MockMounter struct {
- ctrl *gomock.Controller
- recorder *MockMounterMockRecorder
-}
-
-// MockMounterMockRecorder is the mock recorder for MockMounter
-type MockMounterMockRecorder struct {
- mock *MockMounter
-}
-
-// NewMockMounter creates a new mock instance
-func NewMockMounter(ctrl *gomock.Controller) *MockMounter {
- mock := &MockMounter{ctrl: ctrl}
- mock.recorder = &MockMounterMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockMounter) EXPECT() *MockMounterMockRecorder {
- return m.recorder
-}
-
-// Mount mocks base method
-func (m *MockMounter) Mount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Mount", volume, brickAllocations, attachments)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Mount indicates an expected call of Mount
-func (mr *MockMounterMockRecorder) Mount(volume, brickAllocations, attachments interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mount", reflect.TypeOf((*MockMounter)(nil).Mount), volume, brickAllocations, attachments)
-}
-
-// Unmount mocks base method
-func (m *MockMounter) Unmount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Unmount", volume, brickAllocations, attachments)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Unmount indicates an expected call of Unmount
-func (mr *MockMounterMockRecorder) Unmount(volume, brickAllocations, attachments interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmount", reflect.TypeOf((*MockMounter)(nil).Unmount), volume, brickAllocations, attachments)
-}
diff --git a/internal/pkg/mocks/pool_mock.go b/internal/pkg/mocks/pool_mock.go
deleted file mode 100644
index f01738e6..00000000
--- a/internal/pkg/mocks/pool_mock.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: internal/pkg/registry/pool.go
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- context "context"
- registry "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- gomock "github.com/golang/mock/gomock"
- reflect "reflect"
-)
-
-// MockPoolRegistry is a mock of PoolRegistry interface
-type MockPoolRegistry struct {
- ctrl *gomock.Controller
- recorder *MockPoolRegistryMockRecorder
-}
-
-// MockPoolRegistryMockRecorder is the mock recorder for MockPoolRegistry
-type MockPoolRegistryMockRecorder struct {
- mock *MockPoolRegistry
-}
-
-// NewMockPoolRegistry creates a new mock instance
-func NewMockPoolRegistry(ctrl *gomock.Controller) *MockPoolRegistry {
- mock := &MockPoolRegistry{ctrl: ctrl}
- mock.recorder = &MockPoolRegistryMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockPoolRegistry) EXPECT() *MockPoolRegistryMockRecorder {
- return m.recorder
-}
-
-// Pools mocks base method
-func (m *MockPoolRegistry) Pools() ([]registry.Pool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Pools")
- ret0, _ := ret[0].([]registry.Pool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// Pools indicates an expected call of Pools
-func (mr *MockPoolRegistryMockRecorder) Pools() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pools", reflect.TypeOf((*MockPoolRegistry)(nil).Pools))
-}
-
-// UpdateHost mocks base method
-func (m *MockPoolRegistry) UpdateHost(bricks []registry.BrickInfo) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateHost", bricks)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// UpdateHost indicates an expected call of UpdateHost
-func (mr *MockPoolRegistryMockRecorder) UpdateHost(bricks interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHost", reflect.TypeOf((*MockPoolRegistry)(nil).UpdateHost), bricks)
-}
-
-// KeepAliveHost mocks base method
-func (m *MockPoolRegistry) KeepAliveHost(hostname string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "KeepAliveHost", hostname)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// KeepAliveHost indicates an expected call of KeepAliveHost
-func (mr *MockPoolRegistryMockRecorder) KeepAliveHost(hostname interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAliveHost", reflect.TypeOf((*MockPoolRegistry)(nil).KeepAliveHost), hostname)
-}
-
-// AllocateBricksForVolume mocks base method
-func (m *MockPoolRegistry) AllocateBricksForVolume(volume registry.Volume) ([]registry.BrickAllocation, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AllocateBricksForVolume", volume)
- ret0, _ := ret[0].([]registry.BrickAllocation)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// AllocateBricksForVolume indicates an expected call of AllocateBricksForVolume
-func (mr *MockPoolRegistryMockRecorder) AllocateBricksForVolume(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllocateBricksForVolume", reflect.TypeOf((*MockPoolRegistry)(nil).AllocateBricksForVolume), volume)
-}
-
-// DeallocateBricks mocks base method
-func (m *MockPoolRegistry) DeallocateBricks(volume registry.VolumeName) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeallocateBricks", volume)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeallocateBricks indicates an expected call of DeallocateBricks
-func (mr *MockPoolRegistryMockRecorder) DeallocateBricks(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeallocateBricks", reflect.TypeOf((*MockPoolRegistry)(nil).DeallocateBricks), volume)
-}
-
-// HardDeleteAllocations mocks base method
-func (m *MockPoolRegistry) HardDeleteAllocations(allocations []registry.BrickAllocation) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "HardDeleteAllocations", allocations)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// HardDeleteAllocations indicates an expected call of HardDeleteAllocations
-func (mr *MockPoolRegistryMockRecorder) HardDeleteAllocations(allocations interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HardDeleteAllocations", reflect.TypeOf((*MockPoolRegistry)(nil).HardDeleteAllocations), allocations)
-}
-
-// GetAllocationsForHost mocks base method
-func (m *MockPoolRegistry) GetAllocationsForHost(hostname string) ([]registry.BrickAllocation, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetAllocationsForHost", hostname)
- ret0, _ := ret[0].([]registry.BrickAllocation)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetAllocationsForHost indicates an expected call of GetAllocationsForHost
-func (mr *MockPoolRegistryMockRecorder) GetAllocationsForHost(hostname interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllocationsForHost", reflect.TypeOf((*MockPoolRegistry)(nil).GetAllocationsForHost), hostname)
-}
-
-// GetAllocationsForVolume mocks base method
-func (m *MockPoolRegistry) GetAllocationsForVolume(volume registry.VolumeName) ([]registry.BrickAllocation, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetAllocationsForVolume", volume)
- ret0, _ := ret[0].([]registry.BrickAllocation)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetAllocationsForVolume indicates an expected call of GetAllocationsForVolume
-func (mr *MockPoolRegistryMockRecorder) GetAllocationsForVolume(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllocationsForVolume", reflect.TypeOf((*MockPoolRegistry)(nil).GetAllocationsForVolume), volume)
-}
-
-// GetBrickInfo mocks base method
-func (m *MockPoolRegistry) GetBrickInfo(hostname, device string) (registry.BrickInfo, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetBrickInfo", hostname, device)
- ret0, _ := ret[0].(registry.BrickInfo)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetBrickInfo indicates an expected call of GetBrickInfo
-func (mr *MockPoolRegistryMockRecorder) GetBrickInfo(hostname, device interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBrickInfo", reflect.TypeOf((*MockPoolRegistry)(nil).GetBrickInfo), hostname, device)
-}
-
-// GetNewHostBrickAllocations mocks base method
-func (m *MockPoolRegistry) GetNewHostBrickAllocations(ctxt context.Context, hostname string) <-chan registry.BrickAllocation {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetNewHostBrickAllocations", ctxt, hostname)
- ret0, _ := ret[0].(<-chan registry.BrickAllocation)
- return ret0
-}
-
-// GetNewHostBrickAllocations indicates an expected call of GetNewHostBrickAllocations
-func (mr *MockPoolRegistryMockRecorder) GetNewHostBrickAllocations(ctxt, hostname interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNewHostBrickAllocations", reflect.TypeOf((*MockPoolRegistry)(nil).GetNewHostBrickAllocations), ctxt, hostname)
-}
diff --git a/internal/pkg/mocks/volume_mock.go b/internal/pkg/mocks/volume_mock.go
deleted file mode 100644
index 49ffb6bb..00000000
--- a/internal/pkg/mocks/volume_mock.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: internal/pkg/registry/volume.go
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- context "context"
- registry "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- gomock "github.com/golang/mock/gomock"
- reflect "reflect"
-)
-
-// MockVolumeRegistry is a mock of VolumeRegistry interface
-type MockVolumeRegistry struct {
- ctrl *gomock.Controller
- recorder *MockVolumeRegistryMockRecorder
-}
-
-// MockVolumeRegistryMockRecorder is the mock recorder for MockVolumeRegistry
-type MockVolumeRegistryMockRecorder struct {
- mock *MockVolumeRegistry
-}
-
-// NewMockVolumeRegistry creates a new mock instance
-func NewMockVolumeRegistry(ctrl *gomock.Controller) *MockVolumeRegistry {
- mock := &MockVolumeRegistry{ctrl: ctrl}
- mock.recorder = &MockVolumeRegistryMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockVolumeRegistry) EXPECT() *MockVolumeRegistryMockRecorder {
- return m.recorder
-}
-
-// Jobs mocks base method
-func (m *MockVolumeRegistry) Jobs() ([]registry.Job, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Jobs")
- ret0, _ := ret[0].([]registry.Job)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// Jobs indicates an expected call of Jobs
-func (mr *MockVolumeRegistryMockRecorder) Jobs() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Jobs", reflect.TypeOf((*MockVolumeRegistry)(nil).Jobs))
-}
-
-// Job mocks base method
-func (m *MockVolumeRegistry) Job(jobName string) (registry.Job, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Job", jobName)
- ret0, _ := ret[0].(registry.Job)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// Job indicates an expected call of Job
-func (mr *MockVolumeRegistryMockRecorder) Job(jobName interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Job", reflect.TypeOf((*MockVolumeRegistry)(nil).Job), jobName)
-}
-
-// AddJob mocks base method
-func (m *MockVolumeRegistry) AddJob(job registry.Job) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AddJob", job)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// AddJob indicates an expected call of AddJob
-func (mr *MockVolumeRegistryMockRecorder) AddJob(job interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddJob", reflect.TypeOf((*MockVolumeRegistry)(nil).AddJob), job)
-}
-
-// JobAttachHosts mocks base method
-func (m *MockVolumeRegistry) JobAttachHosts(jobName string, hosts []string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "JobAttachHosts", jobName, hosts)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// JobAttachHosts indicates an expected call of JobAttachHosts
-func (mr *MockVolumeRegistryMockRecorder) JobAttachHosts(jobName, hosts interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "JobAttachHosts", reflect.TypeOf((*MockVolumeRegistry)(nil).JobAttachHosts), jobName, hosts)
-}
-
-// DeleteJob mocks base method
-func (m *MockVolumeRegistry) DeleteJob(jobName string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteJob", jobName)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeleteJob indicates an expected call of DeleteJob
-func (mr *MockVolumeRegistryMockRecorder) DeleteJob(jobName interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteJob", reflect.TypeOf((*MockVolumeRegistry)(nil).DeleteJob), jobName)
-}
-
-// AddVolume mocks base method
-func (m *MockVolumeRegistry) AddVolume(volume registry.Volume) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AddVolume", volume)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// AddVolume indicates an expected call of AddVolume
-func (mr *MockVolumeRegistryMockRecorder) AddVolume(volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolume", reflect.TypeOf((*MockVolumeRegistry)(nil).AddVolume), volume)
-}
-
-// Volume mocks base method
-func (m *MockVolumeRegistry) Volume(name registry.VolumeName) (registry.Volume, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Volume", name)
- ret0, _ := ret[0].(registry.Volume)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// Volume indicates an expected call of Volume
-func (mr *MockVolumeRegistryMockRecorder) Volume(name interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Volume", reflect.TypeOf((*MockVolumeRegistry)(nil).Volume), name)
-}
-
-// AllVolumes mocks base method
-func (m *MockVolumeRegistry) AllVolumes() ([]registry.Volume, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AllVolumes")
- ret0, _ := ret[0].([]registry.Volume)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// AllVolumes indicates an expected call of AllVolumes
-func (mr *MockVolumeRegistryMockRecorder) AllVolumes() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllVolumes", reflect.TypeOf((*MockVolumeRegistry)(nil).AllVolumes))
-}
-
-// DeleteVolume mocks base method
-func (m *MockVolumeRegistry) DeleteVolume(name registry.VolumeName) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteVolume", name)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeleteVolume indicates an expected call of DeleteVolume
-func (mr *MockVolumeRegistryMockRecorder) DeleteVolume(name interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockVolumeRegistry)(nil).DeleteVolume), name)
-}
-
-// UpdateState mocks base method
-func (m *MockVolumeRegistry) UpdateState(name registry.VolumeName, state registry.VolumeState) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateState", name, state)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// UpdateState indicates an expected call of UpdateState
-func (mr *MockVolumeRegistryMockRecorder) UpdateState(name, state interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateState", reflect.TypeOf((*MockVolumeRegistry)(nil).UpdateState), name, state)
-}
-
-// UpdateVolumeAttachments mocks base method
-func (m *MockVolumeRegistry) UpdateVolumeAttachments(name registry.VolumeName, attachments []registry.Attachment) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateVolumeAttachments", name, attachments)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// UpdateVolumeAttachments indicates an expected call of UpdateVolumeAttachments
-func (mr *MockVolumeRegistryMockRecorder) UpdateVolumeAttachments(name, attachments interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVolumeAttachments", reflect.TypeOf((*MockVolumeRegistry)(nil).UpdateVolumeAttachments), name, attachments)
-}
-
-// DeleteVolumeAttachments mocks base method
-func (m *MockVolumeRegistry) DeleteVolumeAttachments(name registry.VolumeName, hostnames []string, jobName string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteVolumeAttachments", name, hostnames, jobName)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeleteVolumeAttachments indicates an expected call of DeleteVolumeAttachments
-func (mr *MockVolumeRegistryMockRecorder) DeleteVolumeAttachments(name, hostnames, jobName interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolumeAttachments", reflect.TypeOf((*MockVolumeRegistry)(nil).DeleteVolumeAttachments), name, hostnames, jobName)
-}
-
-// WaitForState mocks base method
-func (m *MockVolumeRegistry) WaitForState(name registry.VolumeName, state registry.VolumeState) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "WaitForState", name, state)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// WaitForState indicates an expected call of WaitForState
-func (mr *MockVolumeRegistryMockRecorder) WaitForState(name, state interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForState", reflect.TypeOf((*MockVolumeRegistry)(nil).WaitForState), name, state)
-}
-
-// WaitForCondition mocks base method
-func (m *MockVolumeRegistry) WaitForCondition(volumeName registry.VolumeName, condition func(*registry.VolumeChange) bool) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "WaitForCondition", volumeName, condition)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// WaitForCondition indicates an expected call of WaitForCondition
-func (mr *MockVolumeRegistryMockRecorder) WaitForCondition(volumeName, condition interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCondition", reflect.TypeOf((*MockVolumeRegistry)(nil).WaitForCondition), volumeName, condition)
-}
-
-// GetVolumeChanges mocks base method
-func (m *MockVolumeRegistry) GetVolumeChanges(ctx context.Context, volume registry.Volume) registry.VolumeChangeChan {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetVolumeChanges", ctx, volume)
- ret0, _ := ret[0].(registry.VolumeChangeChan)
- return ret0
-}
-
-// GetVolumeChanges indicates an expected call of GetVolumeChanges
-func (mr *MockVolumeRegistryMockRecorder) GetVolumeChanges(ctx, volume interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeChanges", reflect.TypeOf((*MockVolumeRegistry)(nil).GetVolumeChanges), ctx, volume)
-}
-
-// VolumeOperationMutex mocks base method
-func (m *MockVolumeRegistry) VolumeOperationMutex(volumeName registry.VolumeName) (registry.Mutex, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "VolumeOperationMutex", volumeName)
- ret0, _ := ret[0].(registry.Mutex)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// VolumeOperationMutex indicates an expected call of VolumeOperationMutex
-func (mr *MockVolumeRegistryMockRecorder) VolumeOperationMutex(volumeName interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeOperationMutex", reflect.TypeOf((*MockVolumeRegistry)(nil).VolumeOperationMutex), volumeName)
-}
-
-// MockMutex is a mock of Mutex interface
-type MockMutex struct {
- ctrl *gomock.Controller
- recorder *MockMutexMockRecorder
-}
-
-// MockMutexMockRecorder is the mock recorder for MockMutex
-type MockMutexMockRecorder struct {
- mock *MockMutex
-}
-
-// NewMockMutex creates a new mock instance
-func NewMockMutex(ctrl *gomock.Controller) *MockMutex {
- mock := &MockMutex{ctrl: ctrl}
- mock.recorder = &MockMutexMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MockMutex) EXPECT() *MockMutexMockRecorder {
- return m.recorder
-}
-
-// Lock mocks base method
-func (m *MockMutex) Lock(ctx context.Context) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Lock", ctx)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Lock indicates an expected call of Lock
-func (mr *MockMutexMockRecorder) Lock(ctx interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockMutex)(nil).Lock), ctx)
-}
-
-// Unlock mocks base method
-func (m *MockMutex) Unlock(ctx context.Context) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Unlock", ctx)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Unlock indicates an expected call of Unlock
-func (mr *MockMutexMockRecorder) Unlock(ctx interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockMutex)(nil).Unlock), ctx)
-}
diff --git a/internal/pkg/pfsprovider/ansible/ansible.go b/internal/pkg/pfsprovider/ansible/ansible.go
deleted file mode 100644
index 27fcb359..00000000
--- a/internal/pkg/pfsprovider/ansible/ansible.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package ansible
-
-import (
- "bytes"
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "gopkg.in/yaml.v2"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-)
-
-type HostInfo struct {
- MGS string `yaml:"mgs,omitempty"`
- MDTS map[string]int `yaml:"mdts,omitempty,flow"`
- OSTS map[string]int `yaml:"osts,omitempty,flow"`
-}
-
-type FSInfo struct {
- Hosts map[string]HostInfo `yaml:"hosts"`
- Vars map[string]string `yaml:"vars"`
-}
-
-type FileSystems struct {
- Children map[string]FSInfo `yaml:"children"`
-}
-
-type Wrapper struct {
- All FileSystems
-}
-
-var DefaultHostGroup = "dac-prod"
-var DefaultMaxMDTs uint = 24
-
-func getInventory(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) string {
- // NOTE: only used by lustre
- mgsDevice := os.Getenv("DAC_MGS_DEV")
- if mgsDevice == "" {
- mgsDevice = "sdb"
- }
- maxMDTs := DefaultMaxMDTs
- maxMDTsConf, err := strconv.ParseUint(os.Getenv("DAC_MAX_MDT_COUNT"), 10, 32)
- if err == nil && maxMDTsConf > 0 {
- maxMDTs = uint(maxMDTsConf)
- }
-
- allocationsByHost := make(map[string][]registry.BrickAllocation)
- for _, allocation := range brickAllocations {
- allocationsByHost[allocation.Hostname] = append(allocationsByHost[allocation.Hostname], allocation)
- }
-
- // If we have more brick allocations than maxMDTs
- // assign at most one mdt per host.
- // While this may give us less MDTs than max MDTs,
- // but it helps spread MDTs across network connections
- oneMdtPerHost := len(brickAllocations) > int(maxMDTs)
-
- hosts := make(map[string]HostInfo)
- mgsnode := ""
- for host, allocations := range allocationsByHost {
- osts := make(map[string]int)
- for _, allocation := range allocations {
- osts[allocation.Device] = int(allocation.AllocatedIndex)
- }
-
- mdts := make(map[string]int)
- if oneMdtPerHost {
- allocation := allocations[0]
- mdts[allocation.Device] = int(allocation.AllocatedIndex)
- } else {
- for _, allocation := range allocations {
- mdts[allocation.Device] = int(allocation.AllocatedIndex)
- }
- }
-
- hostInfo := HostInfo{MDTS: mdts, OSTS: osts}
-
- if allocations[0].AllocatedIndex == 0 {
- if fsType == Lustre {
- hostInfo.MGS = mgsDevice
- } else {
- hostInfo.MGS = allocations[0].Device
- }
- mgsnode = host
- }
- hosts[host] = hostInfo
- }
-
- // for beegfs, mount clients via ansible
- if fsType == BeegFS {
- // TODO: this can't work now, as we need to also pass the job name
- for _, attachment := range volume.Attachments {
- hosts[attachment.Hostname] = HostInfo{}
- }
- }
-
- fsinfo := FSInfo{
- Vars: map[string]string{
- "mgsnode": mgsnode,
- "client_port": fmt.Sprintf("%d", volume.ClientPort),
- "lnet_suffix": getLnetSuffix(),
- "mdt_size": fmt.Sprintf("%dm", getMdtSizeMB()),
- },
- Hosts: hosts,
- }
- fsname := fmt.Sprintf("%s", volume.UUID)
- data := Wrapper{All: FileSystems{Children: map[string]FSInfo{fsname: fsinfo}}}
-
- output, err := yaml.Marshal(data)
- if err != nil {
- log.Fatalln(err)
- }
- strOut := string(output)
- strOut = strings.Replace(strOut, " mgs:", fmt.Sprintf(" %s_mgs:", fsname), -1)
- strOut = strings.Replace(strOut, " mdts:", fmt.Sprintf(" %s_mdts:", fsname), -1)
- strOut = strings.Replace(strOut, " osts:", fmt.Sprintf(" %s_osts:", fsname), -1)
- strOut = strings.Replace(strOut, " mgsnode:", fmt.Sprintf(" %s_mgsnode:", fsname), -1)
- strOut = strings.Replace(strOut, " client_port:", fmt.Sprintf(" %s_client_port:", fsname), -1)
- strOut = strings.Replace(strOut, " mdt_size:", fmt.Sprintf(" %s_mdt_size:", fsname), -1)
-
- hostGroup := os.Getenv("DAC_HOST_GROUP")
- if hostGroup == "" {
- hostGroup = DefaultHostGroup
- }
- strOut = strings.Replace(strOut, "all:", hostGroup+":", -1)
- return strOut
-}
-
-func getPlaybook(fsType FSType, volume registry.Volume) string {
- role := "lustre"
- if fsType == BeegFS {
- role = "beegfs"
- }
- return fmt.Sprintf(`---
-- name: Setup FS
- hosts: %s
- any_errors_fatal: true
- become: yes
- roles:
- - role: %s
- vars:
- fs_name: %s`, volume.UUID, role, volume.UUID)
-}
-
-func getAnsibleDir(suffix string) string {
- ansibleDir := os.Getenv("DAC_ANSIBLE_DIR")
- if ansibleDir == "" {
- ansibleDir = "/var/lib/data-acc/fs-ansible/"
- }
- return path.Join(ansibleDir, suffix)
-}
-
-func setupAnsible(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) (string, error) {
- dir, err := ioutil.TempDir("", fmt.Sprintf("fs%s_", volume.Name))
- if err != nil {
- return dir, err
- }
- log.Println("Using ansible tempdir:", dir)
-
- playbook := getPlaybook(fsType, volume)
- tmpPlaybook := filepath.Join(dir, "dac.yml")
- if err := ioutil.WriteFile(tmpPlaybook, bytes.NewBufferString(playbook).Bytes(), 0666); err != nil {
- return dir, err
- }
- log.Println(playbook)
-
- inventory := getInventory(fsType, volume, brickAllocations)
- tmpInventory := filepath.Join(dir, "inventory")
- if err := ioutil.WriteFile(tmpInventory, bytes.NewBufferString(inventory).Bytes(), 0666); err != nil {
- return dir, err
- }
- log.Println(inventory)
-
- cmd := exec.Command("cp", "-r", getAnsibleDir("roles"), dir)
- output, err := cmd.CombinedOutput()
- log.Println("copy roles", string(output))
- if err != nil {
- return dir, err
- }
- cmd = exec.Command("cp", "-r", getAnsibleDir(".venv"), dir)
- output, err = cmd.CombinedOutput()
- log.Println("copy venv", string(output))
- if err != nil {
- return dir, err
- }
- cmd = exec.Command("cp", "-r", getAnsibleDir("group_vars"), dir)
- output, err = cmd.CombinedOutput()
- log.Println("copy group vars", string(output))
- return dir, err
-}
-
-func executeAnsibleSetup(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- dir, err := setupAnsible(fsType, volume, brickAllocations)
- if err != nil {
- return err
- }
-
- formatArgs := "dac.yml -i inventory --tag format"
- err = executeAnsiblePlaybook(dir, formatArgs)
- if err != nil {
- return err
- }
-
- startupArgs := "dac.yml -i inventory --tag mount,create_mdt,create_mgs,create_osts,client_mount"
- err = executeAnsiblePlaybook(dir, startupArgs)
- if err != nil {
- return err
- }
-
- // only delete if everything worked, to aid debugging
- os.RemoveAll(dir)
- return nil
-}
-
-func executeAnsibleTeardown(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- dir, err := setupAnsible(fsType, volume, brickAllocations)
- if err != nil {
- return err
- }
-
- stopArgs := "dac.yml -i inventory --tag stop_all,unmount,client_unmount"
- err = executeAnsiblePlaybook(dir, stopArgs)
- if err != nil {
- return err
- }
-
- formatArgs := "dac.yml -i inventory --tag clean"
- err = executeAnsiblePlaybook(dir, formatArgs)
- if err != nil {
- return err
- }
-
- // only delete if everything worked, to aid debugging
- os.RemoveAll(dir)
- return nil
-}
-
-func executeAnsibleMount(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- dir, err := setupAnsible(fsType, volume, brickAllocations)
- if err != nil {
- return err
- }
-
- startupArgs := "dac.yml -i inventory --tag client_mount"
- err = executeAnsiblePlaybook(dir, startupArgs)
- if err != nil {
- return err
- }
-
- os.RemoveAll(dir)
- return nil
-}
-
-func executeAnsibleUnmount(fsType FSType, volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- dir, err := setupAnsible(fsType, volume, brickAllocations)
- if err != nil {
- return err
- }
-
- stopArgs := "dac.yml -i inventory --tag client_unmount"
- err = executeAnsiblePlaybook(dir, stopArgs)
- if err != nil {
- return err
- }
-
- os.RemoveAll(dir)
- return nil
-}
-
-func executeAnsiblePlaybook(dir string, args string) error {
- // TODO: downgrade debug log!
- cmdStr := fmt.Sprintf(`cd %s; . .venv/bin/activate; ansible-playbook %s;`, dir, args)
- log.Println("Requested ansible:", cmdStr)
-
- skipAnsible := os.Getenv("DAC_SKIP_ANSIBLE")
- if skipAnsible == "True" {
- log.Println("Skip as DAC_SKIP_ANSIBLE=True")
- time.Sleep(time.Millisecond * 200)
- return nil
- }
-
- var err error
- for i := 1; i <= 3; i++ {
- log.Println("Attempt", i, "of ansible:", cmdStr)
- cmd := exec.Command("bash", "-c", cmdStr)
-
- timer := time.AfterFunc(time.Minute*5, func() {
- log.Println("Time up, waited more than 5 mins to complete.")
- cmd.Process.Kill()
- })
- output, currentErr := cmd.CombinedOutput()
- timer.Stop()
-
- if currentErr == nil {
- log.Println("Completed ansible run:", cmdStr)
- log.Println(string(output))
- return nil
- } else {
- log.Println("Error in ansible run:", string(output))
- err = currentErr
- time.Sleep(time.Second * 2)
- }
- }
- return err
-}
diff --git a/internal/pkg/pfsprovider/ansible/ansible_test.go b/internal/pkg/pfsprovider/ansible/ansible_test.go
deleted file mode 100644
index 9d81d2dd..00000000
--- a/internal/pkg/pfsprovider/ansible/ansible_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package ansible
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestPlugin_GetInventory(t *testing.T) {
- volume := registry.Volume{
- Name: "1", UUID: "abcdefgh", ClientPort: 10002,
- Attachments: []registry.Attachment{
- {Hostname: "cpu1"},
- {Hostname: "cpu2"},
- },
- }
- brickAllocations := []registry.BrickAllocation{
- {Hostname: "dac1", Device: "nvme1n1", AllocatedIndex: 0},
- {Hostname: "dac1", Device: "nvme2n1", AllocatedIndex: 1},
- {Hostname: "dac1", Device: "nvme3n1", AllocatedIndex: 2},
- {Hostname: "dac2", Device: "nvme2n1", AllocatedIndex: 3},
- {Hostname: "dac2", Device: "nvme3n1", AllocatedIndex: 4},
- }
- result := getInventory(BeegFS, volume, brickAllocations)
- expected := `dac-prod:
- children:
- abcdefgh:
- hosts:
- cpu1: {}
- cpu2: {}
- dac1:
- abcdefgh_mgs: nvme1n1
- abcdefgh_mdts: {nvme1n1: 0, nvme2n1: 1, nvme3n1: 2}
- abcdefgh_osts: {nvme1n1: 0, nvme2n1: 1, nvme3n1: 2}
- dac2:
- abcdefgh_mdts: {nvme2n1: 3, nvme3n1: 4}
- abcdefgh_osts: {nvme2n1: 3, nvme3n1: 4}
- vars:
- abcdefgh_client_port: "10002"
- lnet_suffix: ""
- abcdefgh_mdt_size: 20480m
- abcdefgh_mgsnode: dac1
-`
- assert.Equal(t, expected, result)
-}
-
-func TestPlugin_GetInventory_withNoOstOnOneHost(t *testing.T) {
- volume := registry.Volume{Name: "1", UUID: "abcdefgh", ClientPort: 10002}
- brickAllocations := []registry.BrickAllocation{
- {Hostname: "dac1", Device: "nvme1n1", AllocatedIndex: 0},
- {Hostname: "dac2", Device: "nvme2n1", AllocatedIndex: 1},
- {Hostname: "dac2", Device: "nvme3n1", AllocatedIndex: 2},
- }
- result := getInventory(Lustre, volume, brickAllocations)
- expected := `dac-prod:
- children:
- abcdefgh:
- hosts:
- dac1:
- abcdefgh_mgs: sdb
- abcdefgh_mdts: {nvme1n1: 0}
- abcdefgh_osts: {nvme1n1: 0}
- dac2:
- abcdefgh_mdts: {nvme2n1: 1, nvme3n1: 2}
- abcdefgh_osts: {nvme2n1: 1, nvme3n1: 2}
- vars:
- abcdefgh_client_port: "10002"
- lnet_suffix: ""
- abcdefgh_mdt_size: 20480m
- abcdefgh_mgsnode: dac1
-`
- assert.Equal(t, expected, result)
-}
-
-func TestPlugin_GetPlaybook_beegfs(t *testing.T) {
- volume := registry.Volume{Name: "1", UUID: "abcdefgh"}
- result := getPlaybook(BeegFS, volume)
- assert.Equal(t, `---
-- name: Setup FS
- hosts: abcdefgh
- any_errors_fatal: true
- become: yes
- roles:
- - role: beegfs
- vars:
- fs_name: abcdefgh`, result)
-}
-
-func TestPlugin_GetPlaybook_lustre(t *testing.T) {
- volume := registry.Volume{Name: "1", UUID: "abcdefgh"}
- result := getPlaybook(Lustre, volume)
- assert.Equal(t, `---
-- name: Setup FS
- hosts: abcdefgh
- any_errors_fatal: true
- become: yes
- roles:
- - role: lustre
- vars:
- fs_name: abcdefgh`, result)
-}
-
-func TestPlugin_GetInventory_MaxMDT(t *testing.T) {
- volume := registry.Volume{
- Name: "1", UUID: "abcdefgh", ClientPort: 10002,
- Attachments: []registry.Attachment{
- {Hostname: "cpu1"},
- {Hostname: "cpu2"},
- },
- }
-
- var brickAllocations []registry.BrickAllocation
- for i := 1; i <= 26; i = i + 2 {
- brickAllocations = append(brickAllocations, registry.BrickAllocation{
- Hostname: fmt.Sprintf("dac%d", i),
- Device: "nvme1n1",
- AllocatedIndex: uint(i - 1),
- })
- brickAllocations = append(brickAllocations, registry.BrickAllocation{
- Hostname: fmt.Sprintf("dac%d", i),
- Device: "nvme2n1",
- AllocatedIndex: uint(i),
- })
- }
-
- result := getInventory(BeegFS, volume, brickAllocations)
- expected := `dac-prod:
- children:
- abcdefgh:
- hosts:
- cpu1: {}
- cpu2: {}
- dac1:
- abcdefgh_mgs: nvme1n1
- abcdefgh_mdts: {nvme1n1: 0}
- abcdefgh_osts: {nvme1n1: 0, nvme2n1: 1}
- dac3:
- abcdefgh_mdts: {nvme1n1: 2}
- abcdefgh_osts: {nvme1n1: 2, nvme2n1: 3}
- dac5:
- abcdefgh_mdts: {nvme1n1: 4}
- abcdefgh_osts: {nvme1n1: 4, nvme2n1: 5}
- dac7:
- abcdefgh_mdts: {nvme1n1: 6}
- abcdefgh_osts: {nvme1n1: 6, nvme2n1: 7}
- dac9:
- abcdefgh_mdts: {nvme1n1: 8}
- abcdefgh_osts: {nvme1n1: 8, nvme2n1: 9}
- dac11:
- abcdefgh_mdts: {nvme1n1: 10}
- abcdefgh_osts: {nvme1n1: 10, nvme2n1: 11}
- dac13:
- abcdefgh_mdts: {nvme1n1: 12}
- abcdefgh_osts: {nvme1n1: 12, nvme2n1: 13}
- dac15:
- abcdefgh_mdts: {nvme1n1: 14}
- abcdefgh_osts: {nvme1n1: 14, nvme2n1: 15}
- dac17:
- abcdefgh_mdts: {nvme1n1: 16}
- abcdefgh_osts: {nvme1n1: 16, nvme2n1: 17}
- dac19:
- abcdefgh_mdts: {nvme1n1: 18}
- abcdefgh_osts: {nvme1n1: 18, nvme2n1: 19}
- dac21:
- abcdefgh_mdts: {nvme1n1: 20}
- abcdefgh_osts: {nvme1n1: 20, nvme2n1: 21}
- dac23:
- abcdefgh_mdts: {nvme1n1: 22}
- abcdefgh_osts: {nvme1n1: 22, nvme2n1: 23}
- dac25:
- abcdefgh_mdts: {nvme1n1: 24}
- abcdefgh_osts: {nvme1n1: 24, nvme2n1: 25}
- vars:
- abcdefgh_client_port: "10002"
- lnet_suffix: ""
- abcdefgh_mdt_size: 20480m
- abcdefgh_mgsnode: dac1
-`
- assert.Equal(t, expected, result)
-}
diff --git a/internal/pkg/pfsprovider/ansible/copy.go b/internal/pkg/pfsprovider/ansible/copy.go
deleted file mode 100644
index 3fc8a84b..00000000
--- a/internal/pkg/pfsprovider/ansible/copy.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package ansible
-
-import (
- "fmt"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
- "path"
- "strings"
-)
-
-func processDataCopy(volume registry.Volume, request registry.DataCopyRequest) error {
- cmd, err := generateDataCopyCmd(volume, request)
- if err != nil {
- return err
- }
- if cmd == "" {
- log.Println("No files to copy for:", volume.Name)
- return nil
- }
-
- log.Printf("Doing copy: %s", cmd)
-
- // Make sure global dir is setup correctly
- // TODO: share code with mount better
- // TODO: Probably should all get setup in fs-ansible really!!
- mountDir := fmt.Sprintf("/mnt/lustre/%s", volume.UUID)
- sharedDir := path.Join(mountDir, "/global")
- if err := mkdir("localhost", sharedDir); err != nil {
- return err
- }
- if err := fixUpOwnership("localhost", volume.Owner, volume.Group, sharedDir); err != nil {
- return err
- }
-
- // Do the copy
- return runner.Execute("localhost", cmd)
-}
-
-func generateDataCopyCmd(volume registry.Volume, request registry.DataCopyRequest) (string, error) {
- rsync, err := generateRsyncCmd(volume, request)
- if err != nil || rsync == "" {
- return "", err
- }
-
- cmd := fmt.Sprintf("sudo -g '#%d' -u '#%d' %s", volume.Group, volume.Owner, rsync)
- dacHostBufferPath := fmt.Sprintf("/mnt/lustre/%s/global", volume.UUID)
- cmd = fmt.Sprintf("bash -c \"export DW_JOB_STRIPED='%s' && %s\"", dacHostBufferPath, cmd)
- return cmd, nil
-}
-
-func generateRsyncCmd(volume registry.Volume, request registry.DataCopyRequest) (string, error) {
- if request.Source == "" && request.Destination == "" {
- return "", nil
- }
-
- var flags string
- if request.SourceType == registry.Directory {
- flags = "-r -ospgu --stats"
- } else if request.SourceType == registry.File {
- flags = "-ospgu --stats"
- } else {
- return "", fmt.Errorf("unsupported source type %s for volume: %s", request.SourceType, volume.Name)
- }
-
- return fmt.Sprintf("rsync %s %s %s", flags,
- escapePath(request.Source),
- escapePath(request.Destination)), nil
-}
-
-func escapePath(path string) string {
- return strings.Replace(path, "$DW_JOB_STRIPED", "\\$DW_JOB_STRIPED", 1)
-}
diff --git a/internal/pkg/pfsprovider/ansible/mount_test.go b/internal/pkg/pfsprovider/ansible/mount_test.go
deleted file mode 100644
index 8dd45f74..00000000
--- a/internal/pkg/pfsprovider/ansible/mount_test.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package ansible
-
-import (
- "errors"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-type fakeRunner struct {
- err error
- calls int
- hostnames []string
- cmdStrs []string
-}
-
-func (f *fakeRunner) Execute(hostname string, cmdStr string) error {
- f.calls += 1
- f.hostnames = append(f.hostnames, hostname)
- f.cmdStrs = append(f.cmdStrs, cmdStr)
- if cmdStr == "grep /dac/job1_job /etc/mtab" {
- return errors.New("trigger mount")
- }
- return f.err
-}
-
-func Test_mkdir(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
-
- err := mkdir("host", "dir")
- assert.Nil(t, err)
- assert.Equal(t, "host", fake.hostnames[0])
- assert.Equal(t, "mkdir -p dir", fake.cmdStrs[0])
-
- runner = &fakeRunner{err: errors.New("expected")}
- err = mkdir("", "")
- assert.Equal(t, "expected", err.Error())
-}
-
-func Test_mountLustre(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
-
- err := mountLustre("host", "-opa@o2ib1", "mgt", "fs", "/dac/job1_job")
- assert.Nil(t, err)
- assert.Equal(t, 2, fake.calls)
- assert.Equal(t, "host", fake.hostnames[0])
- assert.Equal(t, "host", fake.hostnames[1])
- assert.Equal(t, "grep /dac/job1_job /etc/mtab", fake.cmdStrs[0])
- assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid mgt-opa@o2ib1:/fs /dac/job1_job", fake.cmdStrs[1])
-
- fake = &fakeRunner{err: errors.New("expected")}
- runner = fake
- err = mountRemoteFilesystem(Lustre, "host", "", "mgt", "fs", "asdf")
- assert.Equal(t, "expected", err.Error())
- assert.Equal(t, 2, fake.calls)
- assert.Equal(t, "grep asdf /etc/mtab", fake.cmdStrs[0])
- assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid mgt:/fs asdf", fake.cmdStrs[1])
-}
-
-func Test_createSwap(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
-
- err := createSwap("host", 3, "file", "loopback")
- assert.Nil(t, err)
- assert.Equal(t, "host", fake.hostnames[0])
- assert.Equal(t, "host", fake.hostnames[1])
- assert.Equal(t, "host", fake.hostnames[2])
- assert.Equal(t, 4, len(fake.cmdStrs))
- assert.Equal(t, "dd if=/dev/zero of=file bs=1024 count=3072", fake.cmdStrs[0])
- assert.Equal(t, "chmod 0600 file", fake.cmdStrs[1])
- assert.Equal(t, "losetup loopback file", fake.cmdStrs[2])
- assert.Equal(t, "mkswap loopback", fake.cmdStrs[3])
-}
-
-func Test_fixUpOwnership(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
-
- err := fixUpOwnership("host", 10, 11, "dir")
- assert.Nil(t, err)
-
- assert.Equal(t, 2, fake.calls)
- assert.Equal(t, "host", fake.hostnames[0])
- assert.Equal(t, "chown 10:11 dir", fake.cmdStrs[0])
- assert.Equal(t, "host", fake.hostnames[1])
- assert.Equal(t, "chmod 770 dir", fake.cmdStrs[1])
-}
-
-func Test_Mount(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
- attachments := []registry.Attachment{
- {Hostname: "client1", Job: "job1", State: registry.RequestAttach},
- {Hostname: "client2", Job: "job1", State: registry.RequestAttach},
- {Hostname: "client3", Job: "job3", State: registry.Attached},
- {Hostname: "client3", Job: "job3", State: registry.RequestDetach},
- {Hostname: "client3", Job: "job3", State: registry.Detached},
- {Hostname: "client2", Job: "job2", State: registry.RequestAttach},
- }
- volume := registry.Volume{
- Name: "asdf", JobName: "asdf",
- AttachGlobalNamespace: true,
- AttachPrivateNamespace: true,
- AttachAsSwapBytes: 1024 * 1024, // 1 MiB
- Attachments: attachments,
- ClientPort: 42,
- Owner: 1001,
- Group: 1001,
- }
-
- assert.PanicsWithValue(t,
- "failed to find primary brick for volume: asdf",
- func() { mount(Lustre, volume, nil, nil) })
-
- bricks := []registry.BrickAllocation{
- {Hostname: "host1"},
- {Hostname: "host2"},
- }
- err := mount(Lustre, volume, bricks, attachments)
- assert.Nil(t, err)
- assert.Equal(t, 53, fake.calls)
-
- assert.Equal(t, "client1", fake.hostnames[0])
- assert.Equal(t, "mkdir -p /dac/job1_job", fake.cmdStrs[0])
- assert.Equal(t, "grep /dac/job1_job /etc/mtab", fake.cmdStrs[1])
- assert.Equal(t, "mount -t lustre -o flock,nodev,nosuid host1:/ /dac/job1_job", fake.cmdStrs[2])
-
- assert.Equal(t, "mkdir -p /dac/job1_job/swap", fake.cmdStrs[3])
- assert.Equal(t, "chown 0:0 /dac/job1_job/swap", fake.cmdStrs[4])
- assert.Equal(t, "chmod 770 /dac/job1_job/swap", fake.cmdStrs[5])
- assert.Equal(t, "dd if=/dev/zero of=/dac/job1_job/swap/client1 bs=1024 count=1024", fake.cmdStrs[6])
- assert.Equal(t, "chmod 0600 /dac/job1_job/swap/client1", fake.cmdStrs[7])
- assert.Equal(t, "losetup /dev/loop42 /dac/job1_job/swap/client1", fake.cmdStrs[8])
- assert.Equal(t, "mkswap /dev/loop42", fake.cmdStrs[9])
- assert.Equal(t, "swapon /dev/loop42", fake.cmdStrs[10])
- assert.Equal(t, "mkdir -p /dac/job1_job/private/client1", fake.cmdStrs[11])
- assert.Equal(t, "chown 1001:1001 /dac/job1_job/private/client1", fake.cmdStrs[12])
- assert.Equal(t, "chmod 770 /dac/job1_job/private/client1", fake.cmdStrs[13])
- assert.Equal(t, "ln -s /dac/job1_job/private/client1 /dac/job1_job_private", fake.cmdStrs[14])
-
- assert.Equal(t, "mkdir -p /dac/job1_job/global", fake.cmdStrs[15])
- assert.Equal(t, "chown 1001:1001 /dac/job1_job/global", fake.cmdStrs[16])
- assert.Equal(t, "chmod 770 /dac/job1_job/global", fake.cmdStrs[17])
-
- assert.Equal(t, "client2", fake.hostnames[18])
- assert.Equal(t, "mkdir -p /dac/job1_job", fake.cmdStrs[18])
-
- assert.Equal(t, "client2", fake.hostnames[36])
- assert.Equal(t, "mkdir -p /dac/job2_job", fake.cmdStrs[36])
- assert.Equal(t, "client2", fake.hostnames[52])
- assert.Equal(t, "chmod 770 /dac/job2_job/global", fake.cmdStrs[52])
-}
-
-func Test_Umount(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
- attachments := []registry.Attachment{
- {Hostname: "client1", Job: "job4", State: registry.RequestDetach},
- {Hostname: "client2", Job: "job4", State: registry.RequestDetach},
- {Hostname: "client3", Job: "job3", State: registry.Attached},
- {Hostname: "client3", Job: "job3", State: registry.RequestAttach},
- {Hostname: "client3", Job: "job3", State: registry.Detached},
- {Hostname: "client2", Job: "job1", State: registry.RequestDetach},
- }
- volume := registry.Volume{
- Name: "asdf", JobName: "asdf",
- AttachGlobalNamespace: true,
- AttachPrivateNamespace: true,
- AttachAsSwapBytes: 10000,
- Attachments: attachments,
- ClientPort: 42,
- Owner: 1001,
- Group: 1001,
- }
- bricks := []registry.BrickAllocation{
- {Hostname: "host1"},
- {Hostname: "host2"},
- }
- err := umount(Lustre, volume, bricks, attachments)
- assert.Nil(t, err)
- assert.Equal(t, 20, fake.calls)
-
- assert.Equal(t, "client1", fake.hostnames[0])
- assert.Equal(t, "swapoff /dev/loop42", fake.cmdStrs[0])
- assert.Equal(t, "losetup -d /dev/loop42", fake.cmdStrs[1])
- assert.Equal(t, "rm -rf /dac/job4_job/swap/client1", fake.cmdStrs[2])
- assert.Equal(t, "rm -rf /dac/job4_job_private", fake.cmdStrs[3])
- assert.Equal(t, "grep /dac/job4_job /etc/mtab", fake.cmdStrs[4])
- assert.Equal(t, "umount /dac/job4_job", fake.cmdStrs[5])
- assert.Equal(t, "rm -rf /dac/job4_job", fake.cmdStrs[6])
-
- assert.Equal(t, "client2", fake.hostnames[7])
- assert.Equal(t, "swapoff /dev/loop42", fake.cmdStrs[7])
-
- assert.Equal(t, "client2", fake.hostnames[19])
- assert.Equal(t, "rm -rf /dac/job1_job", fake.cmdStrs[19])
-}
-
-func Test_Umount_multi(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
- attachments := []registry.Attachment{
- {Hostname: "client1", Job: "job1", State: registry.RequestDetach},
- }
- volume := registry.Volume{
- MultiJob: true,
- Name: "asdf", JobName: "asdf",
- AttachGlobalNamespace: true,
- AttachPrivateNamespace: true,
- AttachAsSwapBytes: 10000,
- Attachments: attachments,
- ClientPort: 42,
- Owner: 1001,
- Group: 1001,
- }
- bricks := []registry.BrickAllocation{
- {Hostname: "host1"},
- {Hostname: "host2"},
- }
- err := umount(Lustre, volume, bricks, attachments)
- assert.Nil(t, err)
- assert.Equal(t, 3, fake.calls)
-
- assert.Equal(t, "client1", fake.hostnames[0])
- assert.Equal(t, "grep /dac/job1_persistent_asdf /etc/mtab", fake.cmdStrs[0])
- assert.Equal(t, "umount /dac/job1_persistent_asdf", fake.cmdStrs[1])
- assert.Equal(t, "rm -rf /dac/job1_persistent_asdf", fake.cmdStrs[2])
-}
-
-func Test_Mount_multi(t *testing.T) {
- defer func() { runner = &run{} }()
- fake := &fakeRunner{}
- runner = fake
- attachments := []registry.Attachment{
- {Hostname: "client1", Job: "job1", State: registry.RequestAttach},
- }
- volume := registry.Volume{
- MultiJob: true,
- Name: "asdf", JobName: "asdf",
- AttachGlobalNamespace: true,
- AttachPrivateNamespace: true,
- AttachAsSwapBytes: 10000,
- Attachments: attachments,
- ClientPort: 42,
- Owner: 1001,
- Group: 1001,
- UUID: "medkDfdg",
- }
- bricks := []registry.BrickAllocation{
- {Hostname: "host1"},
- {Hostname: "host2"},
- }
- err := mount(Lustre, volume, bricks, attachments)
- assert.Nil(t, err)
- assert.Equal(t, 5, fake.calls)
-
- assert.Equal(t, "client1", fake.hostnames[0])
- assert.Equal(t, "mkdir -p /dac/job1_persistent_asdf", fake.cmdStrs[0])
- assert.Equal(t, "grep /dac/job1_persistent_asdf /etc/mtab", fake.cmdStrs[1])
- assert.Equal(t, "mkdir -p /dac/job1_persistent_asdf/global", fake.cmdStrs[2])
- assert.Equal(t, "chown 1001:1001 /dac/job1_persistent_asdf/global", fake.cmdStrs[3])
- assert.Equal(t, "chmod 770 /dac/job1_persistent_asdf/global", fake.cmdStrs[4])
-}
diff --git a/internal/pkg/pfsprovider/ansible/plugin.go b/internal/pkg/pfsprovider/ansible/plugin.go
deleted file mode 100644
index a55f0678..00000000
--- a/internal/pkg/pfsprovider/ansible/plugin.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package ansible
-
-import (
- "bytes"
- "encoding/json"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/pfsprovider"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
-)
-
-func GetPlugin(fsType FSType) pfsprovider.Plugin {
- return &plugin{FSType: fsType}
-}
-
-type FSType int
-
-const (
- BeegFS FSType = iota
- Lustre
-)
-
-var fsTypeStrings = map[FSType]string{
- BeegFS: "BeegFS",
- Lustre: "Lustre",
-}
-var stringToFSType = map[string]FSType{
- "": BeegFS,
- "BeegFS": BeegFS,
- "Lustre": Lustre,
-}
-
-func (fsType FSType) String() string {
- return fsTypeStrings[fsType]
-}
-
-func (fsType FSType) MarshalJSON() ([]byte, error) {
- buffer := bytes.NewBufferString(`"`)
- buffer.WriteString(fsTypeStrings[fsType])
- buffer.WriteString(`"`)
- return buffer.Bytes(), nil
-}
-
-func (fsType *FSType) UnmarshalJSON(b []byte) error {
- var str string
- err := json.Unmarshal(b, &str)
- if err != nil {
- return err
- }
- *fsType = stringToFSType[str]
- return nil
-}
-
-type plugin struct {
- FSType FSType
-}
-
-func (plugin *plugin) Mounter() pfsprovider.Mounter {
- return &mounter{FSType: plugin.FSType}
-}
-
-func (plugin *plugin) VolumeProvider() pfsprovider.VolumeProvider {
- return &volumeProvider{FSType: plugin.FSType}
-}
-
-type volumeProvider struct {
- FSType FSType
-}
-
-func (volProvider *volumeProvider) SetupVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- return executeAnsibleSetup(volProvider.FSType, volume, brickAllocations)
-}
-
-func (volProvider *volumeProvider) TeardownVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- return executeAnsibleTeardown(volProvider.FSType, volume, brickAllocations)
-}
-
-func (*volumeProvider) CopyDataIn(volume registry.Volume) error {
- // TODO we should support multiple stagein commands! oops!
- return processDataCopy(volume, volume.StageIn)
-}
-
-func (*volumeProvider) CopyDataOut(volume registry.Volume) error {
- // TODO we should support multiple stageout commands too! oops!
- return processDataCopy(volume, volume.StageOut)
-}
-
-type mounter struct {
- FSType FSType
-}
-
-func (mounter *mounter) Mount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- return mount(mounter.FSType, volume, brickAllocations, attachments)
-}
-
-func (mounter *mounter) Unmount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- return umount(mounter.FSType, volume, brickAllocations, attachments)
-}
diff --git a/internal/pkg/pfsprovider/fake/plugin.go b/internal/pkg/pfsprovider/fake/plugin.go
deleted file mode 100644
index 1583c43c..00000000
--- a/internal/pkg/pfsprovider/fake/plugin.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package fake
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/pfsprovider"
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
- "log"
-)
-
-func GetPlugin() pfsprovider.Plugin {
- return &plugin{}
-}
-
-type plugin struct{}
-
-func (*plugin) Mounter() pfsprovider.Mounter {
- return &mounter{}
-}
-
-func (*plugin) VolumeProvider() pfsprovider.VolumeProvider {
- return &volumeProvider{}
-}
-
-type volumeProvider struct{}
-
-func (*volumeProvider) SetupVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- log.Println("SetupVolume for:", volume.Name)
- return nil
-}
-
-func (*volumeProvider) TeardownVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error {
- log.Println("TeardownVolume for:", volume.Name)
- return nil
-}
-
-func (*volumeProvider) CopyDataIn(volume registry.Volume) error {
- log.Println("CopyDataIn for:", volume.Name)
- return nil
-}
-
-func (*volumeProvider) CopyDataOut(volume registry.Volume) error {
- log.Println("CopyDataOut for:", volume.Name)
- return nil
-}
-
-type mounter struct{}
-
-func (*mounter) Mount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- log.Println("Mount for:", volume.Name)
- return nil
-}
-
-func (*mounter) Unmount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error {
- log.Println("Umount for:", volume.Name)
- return nil
-}
diff --git a/internal/pkg/pfsprovider/interface.go b/internal/pkg/pfsprovider/interface.go
deleted file mode 100644
index 4bbc85d0..00000000
--- a/internal/pkg/pfsprovider/interface.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package pfsprovider
-
-import (
- "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
-)
-
-// A plugin must provide implementations for both interfaces
-type Plugin interface {
- Mounter() Mounter
- VolumeProvider() VolumeProvider
-}
-
-// Actions on the host assigned to the primary brick
-type VolumeProvider interface {
- SetupVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error
- TeardownVolume(volume registry.Volume, brickAllocations []registry.BrickAllocation) error
-
- CopyDataIn(volume registry.Volume) error
- CopyDataOut(volume registry.Volume) error
-}
-
-// Actions that are sent to remote hosts,
-// typically compute nodes and primary brick hosts
-type Mounter interface {
- Mount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error
- Unmount(volume registry.Volume, brickAllocations []registry.BrickAllocation, attachments []registry.Attachment) error
-}
diff --git a/internal/pkg/registry/brick_allocation.go b/internal/pkg/registry/brick_allocation.go
new file mode 100644
index 00000000..13b7780c
--- /dev/null
+++ b/internal/pkg/registry/brick_allocation.go
@@ -0,0 +1,25 @@
+package registry
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+)
+
+type AllocationRegistry interface {
+ // Caller should acquire this mutex before calling GetAllPools then CreateAllocations
+ GetAllocationMutex() (store.Mutex, error)
+
+ // Get all registered pools
+ GetPool(name datamodel.PoolName) (datamodel.Pool, error)
+
+ // Creates the pool if it doesn't exist
+ // error if the granularity doesn't match and existing pool
+ EnsurePoolCreated(poolName datamodel.PoolName, granularityBytes uint) (datamodel.Pool, error)
+
+ // Get brick availability by pool
+ GetAllPoolInfos() ([]datamodel.PoolInfo, error)
+
+ // Get brick availability for one pool
+ // bricks are only available if corresponding host currently alive
+ GetPoolInfo(poolName datamodel.PoolName) (datamodel.PoolInfo, error)
+}
diff --git a/internal/pkg/registry/brick_host.go b/internal/pkg/registry/brick_host.go
new file mode 100644
index 00000000..ebff7ce0
--- /dev/null
+++ b/internal/pkg/registry/brick_host.go
@@ -0,0 +1,29 @@
+package registry
+
+import (
+ "context"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+)
+
+type BrickHostRegistry interface {
+ // BrickHost updates bricks on startup
+ // This will error if we remove a brick that has an allocation
+ // for a Session that isn't in an error state
+ // This includes ensuring the pool exists and is consistent with the given brick host info
+ UpdateBrickHost(brickHostInfo datamodel.BrickHost) error
+
+ // Get all brick hosts
+ GetAllBrickHosts() ([]datamodel.BrickHost, error)
+
+ // While the process is still running this notifies others the host is up
+ //
+ // When a host is dead non of its bricks will get new volumes assigned,
+ // and no bricks will get cleaned up until the next service start.
+ // Error will be returned if the host info has not yet been written.
+ KeepAliveHost(ctxt context.Context, brickHostName datamodel.BrickHostName) error
+
+ // Check if given brick host is alive
+ //
+ // Error if brick host doesn't exist
+ IsBrickHostAlive(brickHostName datamodel.BrickHostName) (bool, error)
+}
diff --git a/internal/pkg/registry/pool.go b/internal/pkg/registry/pool.go
deleted file mode 100644
index 09acd4d8..00000000
--- a/internal/pkg/registry/pool.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package registry
-
-import (
- "context"
- "encoding/json"
- "log"
-)
-
-type PoolRegistry interface {
- // Returns a summary of the current state of all pools, including the bricks in each pool
- Pools() ([]Pool, error)
-
- // TODO: Pool(name string) (Pool, error)
-
- // Update (or add) information on what bricks are present
- //
- // Note: it is possible to have bricks from multiple pools on a single host
- // If any bricks that were previously registered have gone away,
- // they will be removed, unless there is an associated BrickAllocation which will
- // cause the update to fail and returns an error.
- // If any bricks in the same pool have a different capacity,
- // the update fails and returns an error.
- UpdateHost(bricks []BrickInfo) error
-
- // While the process is still running this notifies others the host is up
- //
- // When a host is dead non of its bricks will get new volumes assigned,
- // and no bricks will get cleaned up until the next service start.
- // Error will be returned if the host info has not yet been written.
- KeepAliveHost(hostname string) error
-
- // Update a brick with allocation information.
- //
- // No update is made and an error is returned if:
- // any brick already has an allocation,
- // or any volume a brick is being assigned to already has an allocation,
- // or if any of the volumes do not exist
- // or if there is not exactly one primary brick.
- //
- // Note: you may assign multiple volumes in a single call, but all bricks
- // for a particular volume must be set in a single call
- AllocateBricksForVolume(volume Volume) ([]BrickAllocation, error)
-
- // Deallocate all bricks associated with the given volume
- //
- // No update is made and an error is returned if any of brick allocations don't match the current state.
- // If any host associated with one of the bricks is down, an error is returned and the deallocate is
- // recorded as requested and not executed.
- // Note: this returns as soon as deallocate is requested, doesn't wait for cleanup completion
- DeallocateBricks(volume VolumeName) error
-
- // This is called after DeallocateBricks has been processed
- HardDeleteAllocations(allocations []BrickAllocation) error
-
- // Get all the allocations for bricks associated with the specified hostname
- GetAllocationsForHost(hostname string) ([]BrickAllocation, error)
-
- // Get all the allocations for bricks associated with the specific volume
- GetAllocationsForVolume(volume VolumeName) ([]BrickAllocation, error)
-
- // Get information on a specific brick
- GetBrickInfo(hostname string, device string) (BrickInfo, error)
-
- // Returns a channel that reports all new brick allocations for given hostname
- //
- // The channel is closed when the context is cancelled or timeout.
- // Any errors in the watching log the issue and panic
- GetNewHostBrickAllocations(ctxt context.Context, hostname string) <-chan BrickAllocation
-}
-
-type Pool struct {
- // The pool is derived from all the reported bricks
- // It must only contain the characters A-Za-z0-9
- Name string // TODO: should we create PoolName type?
-
- // Returns all unallocated bricks in this pool associated with a live host
- AvailableBricks []BrickInfo
-
- // Returns all brick allocations for this pool
- AllocatedBricks []BrickAllocation
-
- // This is the allocation unit for the pool
- // It is the minimum size of any registered brick
- GranularityGB uint
-
- // List of all hosts that report bricks in this pool
- Hosts map[string]HostInfo
-}
-
-func (pool Pool) String() string {
- poolString, err := json.Marshal(pool)
- if err != nil {
- log.Fatal(err)
- }
- return string(poolString)
-}
-
-type HostInfo struct {
- // It must only contain the characters "A-Za-z0-9."
- Hostname string
-
- // True if data accelerator process is thought to be running
- Alive bool
-}
-
-type BrickInfo struct {
- // Bricks are identified by device and hostname
- // It must only contain the characters A-Za-z0-9
- Device string
-
- // It must only contain the characters "A-Za-z0-9."
- Hostname string
-
- // The bool a brick is associated with
- // It must only contain the characters A-Za-z0-9
- PoolName string
-
- // Size of the brick, defines the pool granularity
- CapacityGB uint
-}
-
-type BrickAllocation struct {
- // Bricks are identified by device and hostname
- // It must only contain the characters A-Za-z0-9
- Device string
-
- // It must only contain the characters "A-Za-z0-9."
- Hostname string
-
- // Name of the volume that owns the brick
- AllocatedVolume VolumeName
-
- // 0 index allocation is the primary brick,
- // which is responsible for provisioning the associated volume
- AllocatedIndex uint
-
- // If any allocation sent to deallocate has a host that isn't
- // alive, this flag is set rather than have allocations removed.
- // A host should check for any allocations
- DeallocateRequested bool
-}
diff --git a/internal/pkg/registry/session.go b/internal/pkg/registry/session.go
new file mode 100644
index 00000000..9bde9b0a
--- /dev/null
+++ b/internal/pkg/registry/session.go
@@ -0,0 +1,41 @@
+package registry
+
+import (
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+)
+
+// TODO: rename to instance? or filesystem? or just in the object model?
+type SessionRegistry interface {
+ // This mutex should be held before doing any operations on given session
+ //
+ // No error if the session doesn't exist, as this is also used when creating a session
+ GetSessionMutex(sessionName datamodel.SessionName) (store.Mutex, error)
+
+ // Update provided session
+ //
+ // Error is session already exists
+ CreateSession(session datamodel.Session) (datamodel.Session, error)
+
+ // Get requested session
+ //
+ // Error if session does not exist
+ GetSession(sessionName datamodel.SessionName) (datamodel.Session, error)
+
+ // Get all sessions
+ GetAllSessions() ([]datamodel.Session, error)
+
+ // Update provided session
+ //
+ // Error if current revision does not match (i.e. caller has a stale copy of Session)
+ // Error if session does not exist
+ UpdateSession(session datamodel.Session) (datamodel.Session, error)
+
+ // This is called before confirming the Session delete request,
+ // after all bricks have been de-allocated
+ //
+ // Error if session has any allocations
+ // Error if session doesn't match current revision
+ // No error if session has already been deleted
+ DeleteSession(session datamodel.Session) error
+}
diff --git a/internal/pkg/registry/session_actions.go b/internal/pkg/registry/session_actions.go
new file mode 100644
index 00000000..670176fe
--- /dev/null
+++ b/internal/pkg/registry/session_actions.go
@@ -0,0 +1,28 @@
+package registry
+
+import (
+ "context"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+)
+
+type SessionActions interface {
+ // Updates session, then requests action
+ //
+ // Error if current revision of session doesn't match
+ // Error if context is cancelled or timed-out
+ SendSessionAction(
+ ctxt context.Context, actionType datamodel.SessionActionType,
+ session datamodel.Session) (<-chan datamodel.SessionAction, error)
+
+ // Gets all actions for the given host
+ GetSessionActionRequests(ctxt context.Context, brickHostName datamodel.BrickHostName) (<-chan datamodel.SessionAction, error)
+
+ // Get any actions that have not been completed
+ GetOutstandingSessionActionRequests(brickHostName datamodel.BrickHostName) ([]datamodel.SessionAction, error)
+
+ // Server reports given action is complete
+ // Includes callbacks for Create Session Volume
+ //
+ // Error if action has already completed or doesn't exist
+ CompleteSessionAction(action datamodel.SessionAction) error
+}
diff --git a/internal/pkg/registry/volume.go b/internal/pkg/registry/volume.go
deleted file mode 100644
index 4c6fbd60..00000000
--- a/internal/pkg/registry/volume.go
+++ /dev/null
@@ -1,347 +0,0 @@
-package registry
-
-import (
- "bytes"
- "context"
- "encoding/json"
-)
-
-type VolumeRegistry interface {
- // Get all registered jobs and their volumes
- Jobs() ([]Job, error)
-
- // Get a specific job
- Job(jobName string) (Job, error)
-
- // Add job and associated volumes
- // Fails to add job if volumes are in a bad state
- AddJob(job Job) error
-
- // Update specified job with given hosts
- // Fails if the job already has any hosts associated with it
- JobAttachHosts(jobName string, hosts []string) error
-
- // Remove job from the system
- // TODO: fails if volumes are not in the deleted state?
- DeleteJob(jobName string) error
-
- // Get information about specific volume
- // TODO: remove add/detele and only
- AddVolume(volume Volume) error
-
- // Get information about a specific volume
- Volume(name VolumeName) (Volume, error)
-
- // Get information about all volumes
- AllVolumes() ([]Volume, error)
-
- // TODO: this should error if volume is not in correct state?
- DeleteVolume(name VolumeName) error
-
- // Move between volume states, but only one by one
- UpdateState(name VolumeName, state VolumeState) error
-
- // Update all the specified attachments
- // if attachment doesn't exist, attachment is added
- UpdateVolumeAttachments(name VolumeName, attachments []Attachment) error
-
- // Delete all the specified attachments
- DeleteVolumeAttachments(name VolumeName, hostnames []string, jobName string) error
-
- // Wait for a specific state, error returned if not possible
- WaitForState(name VolumeName, state VolumeState) error
-
- // Wait for a given condition
- //
- // Blocks until condition returns true, or returns error if things timeout
- WaitForCondition(volumeName VolumeName, condition func(event *VolumeChange) bool) error
-
- // Gets all changes that happen to the given volume
- //
- // To stop watching cancel or timeout the context, this will close the channel.
- GetVolumeChanges(ctx context.Context, volume Volume) VolumeChangeChan
-
- // Get a new mutex associated with the specified key
- VolumeOperationMutex(volumeName VolumeName) (Mutex, error)
-}
-
-type Mutex interface {
- Lock(ctx context.Context) error
- Unlock(ctx context.Context) error
-}
-
-type VolumeChangeChan <-chan VolumeChange
-
-type VolumeChange struct {
- New *Volume
- Old *Volume
- IsDelete bool
- Err error
-}
-
-// TODO: Attachment request, or session is probably a better name here...
-type Job struct {
- // Name of the job
- Name string // TODO: should we make a JobName type?
- Owner uint
- CreatedAt uint
-
- // The hosts that want to mount the storage
- // Note: to allow for copy in/out the brick hosts are assumed to have an attachment
- AttachHosts []string
-
- // If non-zero capacity requested, a volume is created for this job
- // It may be exposed to the attach hosts in a variety of ways, as defined by the volume
- JobVolume VolumeName
-
- // There maybe be attachments to multiple shared volumes
- MultiJobVolumes []VolumeName
-
- // Environment variables for each volume associated with the job
- Paths map[string]string
-}
-
-type VolumeName string
-
-// Volume information
-// To get assigned bricks see PoolRegistry
-type Volume struct {
- // e.g. job1 or Foo
- Name VolumeName
- // its 8 characters long, so works nicely with lustre
- UUID string
- // True if multiple jobs can attach to this volume
- MultiJob bool
-
- // Message requested actions to primary brick host
- // TODO: move mount and data copy actions to other parts of the volume state
- State VolumeState
-
- // Requested pool of bricks for volume
- Pool string // TODO: PoolName?
- // Number of bricks requested, calculated from requested capacity
- SizeBricks uint
- // Actual size of the volume
- SizeGB uint
-
- // Back reference to what job created this volume
- JobName string
- // e.g. 1001
- Owner uint
- // If empty defaults to User
- Group uint
- // e.g. SLURM or Manila
- CreatedBy string
- // The unix (utc) timestamp of when this volume was created
- CreatedAt uint
-
- // TODO: need to fill these in...
- // They all related to how the volume is attached
-
- // All current attachments
- Attachments []Attachment
- // Attach all attachments to a shared global namespace
- // Allowed for any volume type
- AttachGlobalNamespace bool
- // Have an attachment specific namespace mounted, only for non multi job
- AttachPrivateNamespace bool
- // If not zero, swap of the requested amount mounted for each attachment
- // Not allowed for multi job
- AttachAsSwapBytes uint
- // Add attachment specific cache for each given filesystem path
- // Not allowed for multi job
- // Note: assumes the same path is cached for all attachments
- AttachPrivateCache []string
-
- // TODO: maybe data copy should be a slice associated with the job?
- // Request certain files to be staged in
- // Not currently allowed for multi job volumes
- StageIn DataCopyRequest
- // Request certain files to be staged in
- // Not currently allowed for multi job volumes
- StageOut DataCopyRequest
-
- // BeeGFS wants each fs to be assigned a unique port number
- ClientPort int
-
- // Track if we have had bricks assigned
- // if we request delete, no bricks ever assigned, don't ait for dacd!
- HadBricksAssigned bool
-
- // TODO: data model currently does not do these things well:
- // 1. correctly track multiple jobs at the same time attach to the same persistent buffer
- // 2. data in/out requests for persistent buffer
- // 3. track amount of space used by swap and/or metadata
-}
-
-func (volume Volume) String() string {
- rawVolume, _ := json.Marshal(volume)
- return string(rawVolume)
-}
-
-type VolumeState int
-
-const (
- Unknown VolumeState = iota
- Registered
- BricksAllocated
- BricksProvisioned // setup waits for this, updated by host manager, paths should be setup, or gone to ERROR
- DataInRequested
- DataInComplete // data_in waits for host manager to data in, or gone to ERROR
- DataOutRequested
- DataOutComplete // data copied out by host manager, or gone to ERROR
- DeleteRequested VolumeState = 399
- BricksDeleted VolumeState = 400 // all bricks correctly deprovisioned unless host down or gone to ERROR
- Error VolumeState = 500
-)
-
-var volumeStateStrings = map[VolumeState]string{
- Unknown: "",
- Registered: "Registered",
- BricksAllocated: "BricksAllocated",
- BricksProvisioned: "BricksProvisioned",
- DataInRequested: "DataInRequested",
- DataInComplete: "DataInComplete",
- DataOutRequested: "DataOutRequested",
- DataOutComplete: "DataOutComplete",
- DeleteRequested: "DeleteRequested",
- BricksDeleted: "BricksDeleted",
- Error: "Error",
-}
-var stringToVolumeState = map[string]VolumeState{
- "": Unknown,
- "Registered": Registered,
- "BricksAllocated": BricksAllocated,
- "BricksProvisioned": BricksProvisioned,
- "DataInRequested": DataInRequested,
- "DataInComplete": DataInComplete,
- "DataOutRequested": DataOutRequested,
- "DataOutComplete": DataOutComplete,
- "DeleteRequested": DeleteRequested,
- "BricksDeleted": BricksDeleted,
- "Error": Error,
-}
-
-func (volumeState VolumeState) String() string {
- return volumeStateStrings[volumeState]
-}
-
-func (volumeState VolumeState) MarshalJSON() ([]byte, error) {
- buffer := bytes.NewBufferString(`"`)
- buffer.WriteString(volumeStateStrings[volumeState])
- buffer.WriteString(`"`)
- return buffer.Bytes(), nil
-}
-
-func (volumeState *VolumeState) UnmarshalJSON(b []byte) error {
- var str string
- err := json.Unmarshal(b, &str)
- if err != nil {
- return err
- }
- *volumeState = stringToVolumeState[str]
- return nil
-}
-
-type DataCopyRequest struct {
- // Source points to a File or a Directory,
- // or a file that contains a list of source and destinations,
- // with each pair on a new line
- SourceType SourceType
- // The path is either to a file or a directory or a
- Source string
- // Must be empty string for type list, otherwise specifes location
- Destination string
- // Used to notify if copy in has been requested
- // TODO: remove volume states and update this instead
- RequestCopyIn bool
- // Report if the copy has completed
- CopyCompleted bool
- // if there was problem, record it
- Error error
-}
-
-type SourceType string
-
-const (
- File SourceType = "file"
- Directory SourceType = "directory"
- // Provide a file that has source and destination file space separated pairs, each on a new line
- List SourceType = "list"
-)
-
-type Attachment struct {
- // Hostname, Job and Volume name uniquely identify an attachment
- Hostname string
-
- // Associated jobName
- Job string
-
- State AttachmentState
-
- // If any error happened, it is reported here
- Error error
-}
-
-type AttachmentState int
-
-const (
- UnknownAttachmentState AttachmentState = iota
- RequestAttach
- Attached
- RequestDetach
- Detached AttachmentState = 400 // all bricks correctly deprovisioned unless host down or gone to ERROR
- AttachmentError AttachmentState = 500
-)
-
-var attachStateStrings = map[AttachmentState]string{
- UnknownAttachmentState: "",
- RequestAttach: "RequestAttach",
- Attached: "Attached",
- RequestDetach: "RequestDetach",
- Detached: "Detached",
- AttachmentError: "AttachmentError",
-}
-var stringToAttachmentState = map[string]AttachmentState{
- "": UnknownAttachmentState,
- "RequestAttach": RequestAttach,
- "Attached": Attached,
- "RequestDetach": RequestDetach,
- "Detached": Detached,
- "AttachmentError": AttachmentError,
-}
-
-func (attachmentState AttachmentState) String() string {
- return attachStateStrings[attachmentState]
-}
-
-func (attachmentState AttachmentState) MarshalJSON() ([]byte, error) {
- buffer := bytes.NewBufferString(`"`)
- buffer.WriteString(attachStateStrings[attachmentState])
- buffer.WriteString(`"`)
- return buffer.Bytes(), nil
-}
-
-func (attachmentState *AttachmentState) UnmarshalJSON(b []byte) error {
- var str string
- err := json.Unmarshal(b, &str)
- if err != nil {
- return err
- }
- *attachmentState = stringToAttachmentState[str]
- return nil
-}
-
-func (volume Volume) FindMatchingAttachment(attachment Attachment) (*Attachment, bool) {
- return volume.FindAttachment(attachment.Hostname, attachment.Job)
-}
-
-func (volume Volume) FindAttachment(hostname string, jobName string) (*Attachment, bool) {
- for _, candidate := range volume.Attachments {
- if candidate.Hostname == hostname && candidate.Job == jobName {
- // TODO: double check for duplicate match?
- return &candidate, true
- }
- }
- return nil, false
-}
diff --git a/internal/pkg/registry_impl/brick_allocation.go b/internal/pkg/registry_impl/brick_allocation.go
new file mode 100644
index 00000000..7f501c2a
--- /dev/null
+++ b/internal/pkg/registry_impl/brick_allocation.go
@@ -0,0 +1,181 @@
+package registry_impl
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+)
+
+func NewAllocationRegistry(keystore store.Keystore) registry.AllocationRegistry {
+ return &allocationRegistry{
+ keystore, NewBrickHostRegistry(keystore), NewSessionRegistry(keystore),
+ }
+}
+
+type allocationRegistry struct {
+ store store.Keystore
+ brickHostRegistry registry.BrickHostRegistry
+ sessionRegistry registry.SessionRegistry
+}
+
+const poolPrefix = "/Pool/"
+const allocationLockKey = "LockAllocation"
+
+func (a *allocationRegistry) GetAllocationMutex() (store.Mutex, error) {
+ return a.store.NewMutex(allocationLockKey)
+}
+
+func getPoolKey(poolName datamodel.PoolName) string {
+ if !parsers.IsValidName(string(poolName)) {
+ log.Panicf("invalid session PrimaryBrickHost")
+ }
+ return fmt.Sprintf("%s%s", poolPrefix, poolName)
+}
+
+func (a *allocationRegistry) EnsurePoolCreated(poolName datamodel.PoolName, granularityBytes uint) (datamodel.Pool, error) {
+ if granularityBytes <= 0 {
+ log.Panicf("granularity must be greater than 0")
+ }
+ key := getPoolKey(poolName)
+ poolExists, err := a.store.IsExist(key)
+ if err != nil {
+ return datamodel.Pool{}, fmt.Errorf("unable to check if pool exists: %s", err)
+ }
+
+ if poolExists {
+ pool, err := a.GetPool(poolName)
+ if err != nil {
+ return pool, fmt.Errorf("unable to get pool due to: %s", err)
+ }
+ if pool.GranularityBytes != granularityBytes {
+ return pool, fmt.Errorf("granularity doesn't match existing pool: %d", pool.GranularityBytes)
+ }
+ return pool, nil
+ }
+
+ // TODO: need an admin tool to delete a "bad" pool
+ // create the pool
+ pool := datamodel.Pool{Name: poolName, GranularityBytes: granularityBytes}
+ value, err := json.Marshal(pool)
+ if err != nil {
+ log.Panicf("failed to convert pool to json: %s", err)
+ }
+ _, err = a.store.Create(key, value)
+ return pool, err
+}
+
+func (a *allocationRegistry) GetPool(poolName datamodel.PoolName) (datamodel.Pool, error) {
+ key := getPoolKey(poolName)
+ keyValueVersion, err := a.store.Get(key)
+ pool := datamodel.Pool{}
+ if err != nil {
+ return pool, fmt.Errorf("unable to get pool due to: %s", err)
+ }
+
+ err = json.Unmarshal(keyValueVersion.Value, &pool)
+ if err != nil {
+ log.Panicf("unable to parse pool")
+ }
+ return pool, nil
+}
+
+func (a *allocationRegistry) getAllPools() (map[datamodel.PoolName]datamodel.Pool, error) {
+ allKeyValues, err := a.store.GetAll(poolPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pools due to: %s", err)
+ }
+ pools := make(map[datamodel.PoolName]datamodel.Pool)
+ for _, keyValueVersion := range allKeyValues {
+ pool := datamodel.Pool{}
+ err = json.Unmarshal(keyValueVersion.Value, &pool)
+ if err != nil {
+ log.Panicf("unable to parse pool")
+ }
+ pools[pool.Name] = pool
+ }
+ return pools, nil
+}
+
+func (a *allocationRegistry) GetAllPoolInfos() ([]datamodel.PoolInfo, error) {
+ pools, err := a.getAllPools()
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pools due to: %s", err)
+ }
+ sessions, err := a.sessionRegistry.GetAllSessions()
+ if err != nil {
+ return nil, fmt.Errorf("unable to get all sessions due to: %s", err)
+ }
+ brickHosts, err := a.brickHostRegistry.GetAllBrickHosts()
+ if err != nil {
+ return nil, fmt.Errorf("unable to get all briks due to: %s", err)
+ }
+
+ var allPoolInfos []datamodel.PoolInfo
+
+ for _, pool := range pools {
+ poolInfo := datamodel.PoolInfo{Pool: pool}
+
+ allocatedDevicesByBrickHost := make(map[datamodel.BrickHostName][]string)
+ for _, session := range sessions {
+ for i, brick := range session.AllocatedBricks {
+ if brick.PoolName == pool.Name {
+ poolInfo.AllocatedBricks = append(poolInfo.AllocatedBricks, datamodel.BrickAllocation{
+ Session: session.Name,
+ Brick: brick,
+ AllocatedIndex: uint(i),
+ })
+ allocatedDevicesByBrickHost[brick.BrickHostName] = append(allocatedDevicesByBrickHost[brick.BrickHostName], brick.Device)
+ }
+ }
+ }
+
+ for _, brickHost := range brickHosts {
+ // skip disabled hosts
+ if !brickHost.Enabled {
+ continue
+ }
+ // skip dead hosts
+ hostAlive, _ := a.brickHostRegistry.IsBrickHostAlive(brickHost.Name)
+ if !hostAlive {
+ continue
+ }
+
+ // look for any unallocated bricks
+ for _, brick := range brickHost.Bricks {
+ allocated := false
+ for _, allocatedDevice := range allocatedDevicesByBrickHost[brick.BrickHostName] {
+ if allocatedDevice == brick.Device {
+ if allocated {
+ log.Panicf("detected duplicated brick allocation: %+v", brick)
+ }
+ allocated = true
+ }
+ }
+ if !allocated {
+ poolInfo.AvailableBricks = append(poolInfo.AvailableBricks, brick)
+ }
+ }
+ }
+
+ allPoolInfos = append(allPoolInfos, poolInfo)
+ }
+ return allPoolInfos, nil
+}
+
+func (a *allocationRegistry) GetPoolInfo(poolName datamodel.PoolName) (datamodel.PoolInfo, error) {
+ allInfo, err := a.GetAllPoolInfos()
+ if err != nil {
+ return datamodel.PoolInfo{}, err
+ }
+
+ for _, poolInfo := range allInfo {
+ if poolInfo.Pool.Name == poolName {
+ return poolInfo, nil
+ }
+ }
+ return datamodel.PoolInfo{}, fmt.Errorf("unable to find pool %s", poolName)
+}
diff --git a/internal/pkg/registry_impl/brick_host.go b/internal/pkg/registry_impl/brick_host.go
new file mode 100644
index 00000000..baf44ec7
--- /dev/null
+++ b/internal/pkg/registry_impl/brick_host.go
@@ -0,0 +1,104 @@
+package registry_impl
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+)
+
+func NewBrickHostRegistry(keystore store.Keystore) registry.BrickHostRegistry {
+ return &brickHostRegistry{keystore}
+}
+
+type brickHostRegistry struct {
+ store store.Keystore
+}
+
+const brickHostPrefix = "/BrickHostStore/"
+const keepAlivePrefix = "/BrickHostAlive/"
+
+func (b *brickHostRegistry) UpdateBrickHost(brickHostInfo datamodel.BrickHost) error {
+ // find out granularity for each reported pool
+ if len(brickHostInfo.Bricks) == 0 {
+ log.Panicf("brick host must have some bricks: %s", brickHostInfo.Name)
+ }
+ poolGranularityGiBMap := make(map[datamodel.PoolName]uint)
+ for _, brick := range brickHostInfo.Bricks {
+ for !parsers.IsValidName(string(brick.PoolName)) {
+ log.Panicf("invalid pool name: %+v", brick)
+ }
+ poolGranularity, ok := poolGranularityGiBMap[brick.PoolName]
+ if !ok {
+ if brick.CapacityGiB <= 0 {
+ log.Panicf("invalid brick size: %+v", brick)
+ }
+ poolGranularityGiBMap[brick.PoolName] = brick.CapacityGiB
+ } else {
+ if brick.CapacityGiB != poolGranularity {
+ log.Panicf("inconsistent brick size: %+v", brick)
+ }
+ }
+ }
+
+ // TODO: odd dependencies here!!
+ allocationRegistry := NewAllocationRegistry(b.store)
+
+ // Check existing pools match what this brick host is reporting
+ for poolName, granularityGiB := range poolGranularityGiBMap {
+ _, err := allocationRegistry.EnsurePoolCreated(poolName, parsers.GetBytes(granularityGiB, "GiB"))
+ if err != nil {
+ return fmt.Errorf("unable to create pool due to: %s", err)
+ }
+ }
+
+ if !parsers.IsValidName(string(brickHostInfo.Name)) {
+ log.Panicf("invalid brick host name: %s", brickHostInfo.Name)
+ }
+ key := fmt.Sprintf("%s%s", brickHostPrefix, brickHostInfo.Name)
+ value, err := json.Marshal(brickHostInfo)
+ if err != nil {
+ log.Panicf("unable to covert brick host to json: %s", brickHostInfo.Name)
+ }
+
+ // Always overwrite any pre-existing key
+ _, err = b.store.Update(key, value, 0)
+ return err
+}
+
+func (b *brickHostRegistry) GetAllBrickHosts() ([]datamodel.BrickHost, error) {
+ allKeyValues, err := b.store.GetAll(brickHostPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get all bricks hosts due to: %s", err)
+ }
+
+ var allBrickHosts []datamodel.BrickHost
+ for _, keyValueVersion := range allKeyValues {
+ brickHost := datamodel.BrickHost{}
+ err := json.Unmarshal(keyValueVersion.Value, &brickHost)
+ if err != nil {
+ log.Panicf("unable to parse brick host due to: %s", err)
+ }
+ allBrickHosts = append(allBrickHosts, brickHost)
+ }
+ return allBrickHosts, nil
+}
+
+func getKeepAliveKey(brickHostName datamodel.BrickHostName) string {
+ if !parsers.IsValidName(string(brickHostName)) {
+ log.Panicf("invalid brick host name: %s", brickHostName)
+ }
+ return fmt.Sprintf("%s%s", keepAlivePrefix, brickHostName)
+}
+
+func (b *brickHostRegistry) KeepAliveHost(ctxt context.Context, brickHostName datamodel.BrickHostName) error {
+ return b.store.KeepAliveKey(ctxt, getKeepAliveKey(brickHostName))
+}
+
+func (b *brickHostRegistry) IsBrickHostAlive(brickHostName datamodel.BrickHostName) (bool, error) {
+ return b.store.IsExist(getKeepAliveKey(brickHostName))
+}
diff --git a/internal/pkg/registry_impl/session.go b/internal/pkg/registry_impl/session.go
new file mode 100644
index 00000000..d7e7cc63
--- /dev/null
+++ b/internal/pkg/registry_impl/session.go
@@ -0,0 +1,119 @@
+package registry_impl
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "log"
+)
+
+func NewSessionRegistry(store store.Keystore) registry.SessionRegistry {
+ return &sessionRegistry{store}
+}
+
+type sessionRegistry struct {
+ store store.Keystore
+}
+
+func (s *sessionRegistry) GetSessionMutex(sessionName datamodel.SessionName) (store.Mutex, error) {
+ sessionKey := getSessionKey(sessionName)
+ return s.store.NewMutex(sessionKey)
+}
+
+const sessionPrefix = "/session/"
+
+func getSessionKey(sessionName datamodel.SessionName) string {
+ if !parsers.IsValidName(string(sessionName)) {
+ log.Panicf("invalid session name: '%s'", sessionName)
+ }
+ return fmt.Sprintf("%s%s", sessionPrefix, sessionName)
+}
+
+func (s *sessionRegistry) CreateSession(session datamodel.Session) (datamodel.Session, error) {
+ sessionKey := getSessionKey(session.Name)
+
+ // TODO: more validation?
+ if session.PrimaryBrickHost == "" {
+ log.Panicf("PrimaryBrickHost must be set before creating session: %s", session.Name)
+ }
+ if session.ActualSizeBytes > 0 {
+ if len(session.AllocatedBricks) == 0 {
+ log.Panicf("session must have allocations before being created: %s", session.Name)
+ }
+ } else {
+ if len(session.AllocatedBricks) != 0 {
+ log.Panicf("allocations out of sync with ActualSizeBytes: %s", session.Name)
+ }
+ // TODO: ensure not allocated to any other session?
+ }
+
+ createRevision, err := s.store.Create(sessionKey, sessionToRaw(session))
+ if err != nil {
+ return session, fmt.Errorf("unable to create session due to: %s", err)
+ }
+
+ // Return the last modification revision
+ session.Revision = createRevision
+ return session, nil
+}
+
+func (s *sessionRegistry) GetSession(sessionName datamodel.SessionName) (datamodel.Session, error) {
+ keyValueVersion, err := s.store.Get(getSessionKey(sessionName))
+ if err != nil {
+ return datamodel.Session{}, fmt.Errorf("unable to get session due to: %s", err)
+ }
+
+ session := sessionFromRaw(keyValueVersion.Value)
+ session.Revision = keyValueVersion.ModRevision
+ return session, nil
+}
+
+func (s *sessionRegistry) GetAllSessions() ([]datamodel.Session, error) {
+ results, err := s.store.GetAll(sessionPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get all sessions due to: %s", err.Error())
+ }
+
+ var sessions []datamodel.Session
+ for _, keyValueVersion := range results {
+ session := sessionFromRaw(keyValueVersion.Value)
+ session.Revision = keyValueVersion.ModRevision
+ sessions = append(sessions, session)
+ }
+
+ return sessions, nil
+}
+
+func (s *sessionRegistry) UpdateSession(session datamodel.Session) (datamodel.Session, error) {
+ newRevision, err := s.store.Update(getSessionKey(session.Name), sessionToRaw(session), session.Revision)
+ if err != nil {
+ return session, fmt.Errorf("unable to update session due to: %s", err.Error())
+ }
+
+ session.Revision = newRevision
+ return session, nil
+}
+
+func (s *sessionRegistry) DeleteSession(session datamodel.Session) error {
+ return s.store.Delete(getSessionKey(session.Name), session.Revision)
+}
+
+func sessionToRaw(session datamodel.Session) []byte {
+ rawSession, err := json.Marshal(session)
+ if err != nil {
+ log.Panicf("unable to convert session to json due to: %s", err.Error())
+ }
+ return rawSession
+}
+
+func sessionFromRaw(raw []byte) datamodel.Session {
+ session := datamodel.Session{}
+ err := json.Unmarshal(raw, &session)
+ if err != nil {
+ log.Panicf("unable parse session from store due to: %s", err)
+ }
+ return session
+}
diff --git a/internal/pkg/registry_impl/session_actions.go b/internal/pkg/registry_impl/session_actions.go
new file mode 100644
index 00000000..87bf354d
--- /dev/null
+++ b/internal/pkg/registry_impl/session_actions.go
@@ -0,0 +1,199 @@
+package registry_impl
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/dacctl/actions_impl/parsers"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "github.com/google/uuid"
+ "log"
+ "sort"
+)
+
+func NewSessionActionsRegistry(store store.Keystore) registry.SessionActions {
+ return &sessionActions{store, NewBrickHostRegistry(store)}
+}
+
+type sessionActions struct {
+ store store.Keystore
+ brickHostRegistry registry.BrickHostRegistry
+}
+
+const sessionActionRequestPrefix = "/session_action/request/"
+
+func getSessionActionRequestHostPrefix(brickHost datamodel.BrickHostName) string {
+ if !parsers.IsValidName(string(brickHost)) {
+ log.Panicf("invalid session PrimaryBrickHost")
+ }
+ return fmt.Sprintf("%s%s/", sessionActionRequestPrefix, brickHost)
+}
+
+func getSessionActionRequestKey(action datamodel.SessionAction) string {
+ hostPrefix := getSessionActionRequestHostPrefix(action.Session.PrimaryBrickHost)
+ if !parsers.IsValidName(action.Uuid) {
+ log.Panicf("invalid session action uuid")
+ }
+ return fmt.Sprintf("%s%s", hostPrefix, action.Uuid)
+}
+
+const sessionActionResponsePrefix = "/session_action/response/"
+
+func getSessionActionResponseKey(action datamodel.SessionAction) string {
+ if !parsers.IsValidName(string(action.Session.Name)) {
+ log.Panicf("invalid session PrimaryBrickHost")
+ }
+ if !parsers.IsValidName(action.Uuid) {
+ log.Panicf("invalid session action uuid %s", action.Uuid)
+ }
+ return fmt.Sprintf("%s%s/%s", sessionActionResponsePrefix, action.Session.Name, action.Uuid)
+}
+
+func sessionActionToRaw(session datamodel.SessionAction) []byte {
+ rawSession, err := json.Marshal(session)
+ if err != nil {
+ log.Panicf("unable to convert session action to json due to: %s", err.Error())
+ }
+ return rawSession
+}
+
+func sessionActionFromRaw(raw []byte) datamodel.SessionAction {
+ session := datamodel.SessionAction{}
+ err := json.Unmarshal(raw, &session)
+ if err != nil {
+ log.Panicf("unable parse session action from store due to: %s", err)
+ }
+ return session
+}
+
+func (s *sessionActions) SendSessionAction(
+ ctxt context.Context, actionType datamodel.SessionActionType,
+ session datamodel.Session) (<-chan datamodel.SessionAction, error) {
+
+ if session.PrimaryBrickHost == "" {
+ panic("sessions must have a primary brick host set")
+ }
+ sessionAction := datamodel.SessionAction{
+ Session: session,
+ ActionType: actionType,
+ Uuid: uuid.New().String(),
+ }
+
+ isAlive, err := s.brickHostRegistry.IsBrickHostAlive(session.PrimaryBrickHost)
+ if err != nil {
+ return nil, fmt.Errorf("unable to check host status: %s", session.PrimaryBrickHost)
+ }
+ if !isAlive {
+ return nil, fmt.Errorf("can't send as primary brick host not alive: %s", session.PrimaryBrickHost)
+ }
+
+ responseKey := getSessionActionResponseKey(sessionAction)
+ callbackKeyUpdates := s.store.Watch(ctxt, responseKey, false)
+
+ requestKey := getSessionActionRequestKey(sessionAction)
+ if _, err := s.store.Create(requestKey, sessionActionToRaw(sessionAction)); err != nil {
+ return nil, fmt.Errorf("unable to send session action due to: %s", err)
+ }
+
+ responseChan := make(chan datamodel.SessionAction)
+
+ go func() {
+ log.Printf("started waiting for action response %+v\n", sessionAction)
+ for update := range callbackKeyUpdates {
+ if !update.IsCreate || update.New.Value == nil {
+ log.Panicf("only expected to see the action response key being created")
+ }
+
+ responseSessionAction := sessionActionFromRaw(update.New.Value)
+ log.Printf("found action response %+v\n", responseSessionAction)
+
+ responseChan <- responseSessionAction
+
+ // delete response now it has been delivered, but only if it was not an error response
+ if responseSessionAction.Error == "" {
+ if count, err := s.store.DeleteAllKeysWithPrefix(responseKey); err != nil || count != 1 {
+ log.Panicf("failed to clean up response key: %s", responseKey)
+ }
+ }
+
+ log.Printf("completed waiting for action response %+v\n", sessionAction)
+ close(responseChan)
+ return
+ }
+ log.Println("stopped waiting for action response, likely the context timed out")
+ // TODO: double check watch gets stopped somehow? assume context has been cancelled externally?
+ }()
+ return responseChan, nil
+}
+
+func (s *sessionActions) GetSessionActionRequests(ctxt context.Context,
+ brickHostName datamodel.BrickHostName) (<-chan datamodel.SessionAction, error) {
+ requestHostPrefix := getSessionActionRequestHostPrefix(brickHostName)
+
+ // TODO: how do we check for any pending actions that exist before we start watching?
+ // or do we only care about pending deletes, and we let them just timeout?
+ requestUpdates := s.store.Watch(ctxt, requestHostPrefix, true)
+
+ sessionActionChan := make(chan datamodel.SessionAction)
+ go func() {
+ log.Printf("Starting watching for SessionActionRequests for %s\n", brickHostName)
+ for update := range requestUpdates {
+ if update.IsDelete {
+ log.Printf("Seen SessionActionRequest deleted for %s\n", brickHostName)
+ continue
+ }
+ if !update.IsCreate || update.New.Value == nil {
+ log.Panicf("don't expect to see updates of session action request key")
+ }
+ log.Printf("Seen SessionActionRequest created for %s\n", brickHostName)
+
+ sessionAction := sessionActionFromRaw(update.New.Value)
+ sessionActionChan <- sessionAction
+ }
+ log.Printf("Stopped watching for SessionActionRequests for %s\n", brickHostName)
+ close(sessionActionChan)
+ }()
+ return sessionActionChan, nil
+}
+
+func (s *sessionActions) GetOutstandingSessionActionRequests(brickHostName datamodel.BrickHostName) ([]datamodel.SessionAction, error) {
+ rawRequests, err := s.store.GetAll(getSessionActionRequestHostPrefix(brickHostName))
+ if err != nil {
+ return nil, err
+ }
+ // Return actions in order they were sent, i.e. create revision order
+ sort.Slice(rawRequests, func(i, j int) bool {
+ return rawRequests[i].CreateRevision < rawRequests[j].CreateRevision
+ })
+ var actions []datamodel.SessionAction
+ for _, request := range rawRequests {
+ actions = append(actions, sessionActionFromRaw(request.Value))
+ }
+ return actions, nil
+}
+
+func (s *sessionActions) CompleteSessionAction(sessionAction datamodel.SessionAction) error {
+ // TODO: when you delete a session, you should delete all completion records?
+
+ // Tell caller we are done by writing this key
+ responseKey := getSessionActionResponseKey(sessionAction)
+ _, err := s.store.Create(responseKey, sessionActionToRaw(sessionAction))
+ if err != nil {
+ return fmt.Errorf("unable to create response message due to: %s", err)
+ }
+
+ // Delete the request, not it is processed
+ requestKey := getSessionActionRequestKey(sessionAction)
+ count, err := s.store.DeleteAllKeysWithPrefix(requestKey)
+ if err != nil {
+ return fmt.Errorf("unable to delete stale request message due to: %s", err)
+ }
+ if count != 1 {
+ return fmt.Errorf("unable to delete stale request message due to: %s", err)
+ }
+
+ log.Printf("Completed session action %s for session %s\n", sessionAction.Uuid, sessionAction.Session.Name)
+ return nil
+}
diff --git a/internal/pkg/registry_impl/session_actions_test.go b/internal/pkg/registry_impl/session_actions_test.go
new file mode 100644
index 00000000..305e7034
--- /dev/null
+++ b/internal/pkg/registry_impl/session_actions_test.go
@@ -0,0 +1,31 @@
+package registry_impl
+
+import (
+ "context"
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_registry"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_store"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSessionActions_SendSessionAction(t *testing.T) {
+ // TODO: need way more testing here
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ brickHost := mock_registry.NewMockBrickHostRegistry(mockCtrl)
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ actions := sessionActions{brickHostRegistry: brickHost, store: keystore}
+ session := datamodel.Session{Name: "foo", PrimaryBrickHost: "host1"}
+ brickHost.EXPECT().IsBrickHostAlive(session.PrimaryBrickHost).Return(true, nil)
+ keystore.EXPECT().Watch(context.TODO(), gomock.Any(), false).Return(nil)
+ fakeErr := errors.New("fake")
+ keystore.EXPECT().Create(gomock.Any(), gomock.Any()).Return(int64(3), fakeErr)
+
+ channel, err := actions.SendSessionAction(context.TODO(), datamodel.SessionCreateFilesystem, session)
+
+ assert.Nil(t, channel)
+ assert.Equal(t, "unable to send session action due to: fake", err.Error())
+}
diff --git a/internal/pkg/registry_impl/session_test.go b/internal/pkg/registry_impl/session_test.go
new file mode 100644
index 00000000..33abdfc2
--- /dev/null
+++ b/internal/pkg/registry_impl/session_test.go
@@ -0,0 +1,171 @@
+package registry_impl
+
+import (
+ "encoding/json"
+ "errors"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/datamodel"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/mock_store"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+var exampleSessionString = []byte(`{"Name":"foo","Revision":0,"Owner":0,"Group":0,"CreatedAt":0,"VolumeRequest":{"MultiJob":false,"Caller":"","TotalCapacityBytes":0,"PoolName":"","Access":0,"Type":0,"SwapBytes":0},"Status":{"Error":"","FileSystemCreated":false,"CopyDataInComplete":false,"CopyDataOutComplete":false,"DeleteRequested":false,"DeleteSkipCopyDataOut":false,"UnmountComplete":false,"MountComplete":false},"StageInRequests":null,"StageOutRequests":null,"MultiJobAttachments":null,"Paths":null,"ActualSizeBytes":0,"AllocatedBricks":null,"PrimaryBrickHost":"host1","RequestedAttachHosts":null,"FilesystemStatus":{"Error":"","InternalName":"","InternalData":""},"CurrentAttachments":null}`)
+var exampleSession = datamodel.Session{Name: "foo", PrimaryBrickHost: "host1"}
+
+func TestExampleString(t *testing.T) {
+ exampleStr, err := json.Marshal(exampleSession)
+ assert.Nil(t, err)
+ assert.Equal(t, string(exampleSessionString), string(exampleStr))
+
+ var unmarshalSession datamodel.Session
+ err = json.Unmarshal(exampleStr, &unmarshalSession)
+ assert.Nil(t, err)
+ assert.Equal(t, unmarshalSession, exampleSession)
+
+ sessionWithError := datamodel.Session{
+ Name: "foo", PrimaryBrickHost: "host1",
+ Status: datamodel.SessionStatus{Error: "fake_error"},
+ }
+ sessionWithErrorStr, err := json.Marshal(sessionWithError)
+ assert.Nil(t, err)
+ assert.Contains(t, string(sessionWithErrorStr), "fake_error")
+}
+
+func TestSessionRegistry_GetSessionMutex(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ fakeErr := errors.New("fake")
+ keystore.EXPECT().NewMutex("/session/foo").Return(nil, fakeErr)
+
+ mutex, err := registry.GetSessionMutex("foo")
+ assert.Nil(t, mutex)
+ assert.Equal(t, fakeErr, err)
+
+ assert.PanicsWithValue(t, "invalid session name: 'foo/bar'", func() {
+ registry.GetSessionMutex("foo/bar")
+ })
+}
+
+func TestSessionRegistry_CreateSession(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ keystore.EXPECT().Create("/session/foo", exampleSessionString).Return(int64(42), nil)
+
+ session, err := registry.CreateSession(exampleSession)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(42), session.Revision)
+
+ assert.PanicsWithValue(t, "invalid session name: 'foo/bar'", func() {
+ registry.CreateSession(datamodel.Session{Name: "foo/bar", PrimaryBrickHost: "host1"})
+ })
+ assert.PanicsWithValue(t, "session must have allocations before being created: foo", func() {
+ registry.CreateSession(datamodel.Session{Name: "foo", ActualSizeBytes: 1024, PrimaryBrickHost: "host1"})
+ })
+ assert.PanicsWithValue(t, "allocations out of sync with ActualSizeBytes: foo", func() {
+ registry.CreateSession(datamodel.Session{
+ Name: "foo",
+ AllocatedBricks: []datamodel.Brick{{}},
+ PrimaryBrickHost: "host1",
+ })
+ })
+ assert.PanicsWithValue(t, "PrimaryBrickHost must be set before creating session: foo", func() {
+ registry.CreateSession(datamodel.Session{Name: "foo", PrimaryBrickHost: ""})
+ })
+}
+
+func TestSessionRegistry_GetSession(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ keystore.EXPECT().Get("/session/foo").Return(store.KeyValueVersion{
+ ModRevision: 42,
+ Value: exampleSessionString,
+ }, nil)
+
+ session, err := registry.GetSession("foo")
+
+ assert.Nil(t, err)
+ assert.Equal(t, datamodel.Session{Name: "foo", Revision: 42, PrimaryBrickHost: "host1"}, session)
+
+ assert.PanicsWithValue(t, "invalid session name: 'foo/bar'", func() {
+ registry.GetSession("foo/bar")
+ })
+
+ fakeErr := errors.New("fake")
+ keystore.EXPECT().Get("/session/foo").Return(store.KeyValueVersion{}, fakeErr)
+ session, err = registry.GetSession("foo")
+ assert.NotNil(t, err)
+ assert.Equal(t, "unable to get session due to: fake", err.Error())
+
+ keystore.EXPECT().Get("/session/foo").Return(store.KeyValueVersion{}, nil)
+ assert.PanicsWithValue(t,
+ "unable parse session from store due to: unexpected end of JSON input",
+ func() {
+ registry.GetSession("foo")
+ })
+}
+
+func TestSessionRegistry_GetAllSessions(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ keystore.EXPECT().GetAll("/session/").Return([]store.KeyValueVersion{{
+ ModRevision: 42,
+ Value: exampleSessionString,
+ }}, nil)
+
+ sessions, err := registry.GetAllSessions()
+ assert.Nil(t, err)
+ assert.Equal(t, []datamodel.Session{{Name: "foo", Revision: 42, PrimaryBrickHost: "host1"}}, sessions)
+
+ fakeErr := errors.New("fake")
+ keystore.EXPECT().GetAll("/session/").Return(nil, fakeErr)
+ sessions, err = registry.GetAllSessions()
+ assert.Nil(t, sessions)
+ assert.NotNil(t, err)
+ assert.Equal(t, "unable to get all sessions due to: fake", err.Error())
+
+ keystore.EXPECT().GetAll("/session/").Return(nil, nil)
+ sessions, err = registry.GetAllSessions()
+ assert.Nil(t, err)
+ assert.Nil(t, sessions)
+
+ keystore.EXPECT().GetAll("/session/").Return([]store.KeyValueVersion{{}}, nil)
+ assert.PanicsWithValue(t,
+ "unable parse session from store due to: unexpected end of JSON input",
+ func() { registry.GetAllSessions() })
+}
+
+func TestSessionRegistry_UpdateSession(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ keystore.EXPECT().Update("/session/foo", exampleSessionString, int64(0)).Return(int64(44), nil)
+
+ session, err := registry.UpdateSession(datamodel.Session{Name: "foo", PrimaryBrickHost: "host1", Revision: 0})
+
+ assert.Nil(t, err)
+ assert.Equal(t, datamodel.Session{Name: "foo", PrimaryBrickHost: "host1", Revision: 44}, session)
+}
+
+func TestSessionRegistry_DeleteSession(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ keystore := mock_store.NewMockKeystore(mockCtrl)
+ registry := NewSessionRegistry(keystore)
+ fakeErr := errors.New("fake")
+ keystore.EXPECT().Delete("/session/foo", int64(40)).Return(fakeErr)
+
+ err := registry.DeleteSession(datamodel.Session{Name: "foo", Revision: 40})
+
+ assert.Equal(t, fakeErr, err)
+}
diff --git a/internal/pkg/keystoreregistry/keystore.go b/internal/pkg/store/keystore.go
similarity index 64%
rename from internal/pkg/keystoreregistry/keystore.go
rename to internal/pkg/store/keystore.go
index c92c191a..fc765a1b 100644
--- a/internal/pkg/keystoreregistry/keystore.go
+++ b/internal/pkg/store/keystore.go
@@ -1,9 +1,7 @@
-package keystoreregistry
+package store
import (
"context"
- "encoding/json"
- "log"
)
type Keystore interface {
@@ -11,39 +9,40 @@ type Keystore interface {
// ... such as a connection to etcd.
Close() error
- // Removes any key starting with the given prefix.
- // An error is returned if nothing was deleted,
- // which some users may choose to safely ignore.
- CleanPrefix(prefix string) error
-
// Atomically add all the key value pairs
//
// If an error occurs no keyvalues are written.
// Error is returned if any key already exists.
- Add(keyValues []KeyValue) error
+ Create(key string, value []byte) (int64, error)
- // Update the specifed key values, atomically
+ // Update the specified key values, atomically
//
// If ModRevision is 0, it is ignored.
// Otherwise if the revisions of any key doesn't
// match the current revision of that key, the update fails.
// When update fails an error is returned and no keyValues are updated
- Update(keyValues []KeyValueVersion) error
+ Update(key string, value []byte, modRevision int64) (int64, error)
- // Delete the specifed key values, atomically
+ // Delete the specified key values, atomically
//
// Similar to update, checks ModRevision matches current key,
// ignores ModRevision if not zero.
// If any keys are not currently present, the request fails.
// Deletes no keys if an error is returned
- DeleteAll(keyValues []KeyValueVersion) error
+ Delete(key string, modRevision int64) error
+
+ // Removes all keys with given prefix
+ DeleteAllKeysWithPrefix(keyPrefix string) (int64, error)
// Get all key values for a given prefix.
- GetAll(prefix string) ([]KeyValueVersion, error)
+ GetAll(keyPrefix string) ([]KeyValueVersion, error)
- // Get all keys for a given prefix.
+ // Get given key
Get(key string) (KeyValueVersion, error)
+ // Check if a given key exists
+ IsExist(key string) (bool, error)
+
// Get a channel containing all KeyValueUpdate events
//
// Use the context to control if you watch forever, or if you choose to cancel when a key
@@ -52,7 +51,8 @@ type Keystore interface {
// Add a key, and remove it when calling process dies
// Error is returned if the key already exists
- KeepAliveKey(key string) error
+ // can be cancelled via the context
+ KeepAliveKey(ctxt context.Context, key string) error
// Get a new mutex associated with the specified key
NewMutex(lockKey string) (Mutex, error)
@@ -60,14 +60,9 @@ type Keystore interface {
type KeyValueUpdateChan <-chan KeyValueUpdate
-type KeyValue struct {
- Key string
- Value string // TODO: should this be []byte? Or have a json parsed version?
-}
-
type KeyValueVersion struct {
Key string
- Value string
+ Value []byte
CreateRevision int64
ModRevision int64
}
@@ -81,18 +76,6 @@ type KeyValueUpdate struct {
Err error
}
-func (kvv KeyValueVersion) String() string {
- return toJson(kvv)
-}
-
-func toJson(message interface{}) string {
- b, error := json.Marshal(message)
- if error != nil {
- log.Fatal(error)
- }
- return string(b)
-}
-
type Mutex interface {
Lock(ctx context.Context) error
Unlock(ctx context.Context) error
diff --git a/internal/pkg/store_impl/keystore.go b/internal/pkg/store_impl/keystore.go
new file mode 100644
index 00000000..ac13b281
--- /dev/null
+++ b/internal/pkg/store_impl/keystore.go
@@ -0,0 +1,297 @@
+package store_impl
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/config"
+ "github.com/RSE-Cambridge/data-acc/internal/pkg/store"
+ "github.com/coreos/etcd/clientv3"
+ "github.com/coreos/etcd/clientv3/clientv3util"
+ "github.com/coreos/etcd/clientv3/concurrency"
+ "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+ "github.com/coreos/etcd/mvcc/mvccpb"
+ "github.com/coreos/etcd/pkg/transport"
+ "log"
+ "time"
+)
+
+func getTLSConfig(keystoreConfig config.KeystoreConfig) *tls.Config {
+ certFile := keystoreConfig.CertFile
+ keyFile := keystoreConfig.KeyFile
+ caFile := keystoreConfig.CAFile
+
+ if certFile == "" || keyFile == "" || caFile == "" {
+ return nil
+ }
+
+ tlsInfo := transport.TLSInfo{
+ CertFile: certFile,
+ KeyFile: keyFile,
+ TrustedCAFile: caFile,
+ }
+ tlsConfig, err := tlsInfo.ClientConfig()
+ if err != nil {
+ log.Fatal(err)
+ }
+ return tlsConfig
+}
+
+func newEtcdClient() *clientv3.Client {
+ conf := config.GetKeystoreConfig(config.DefaultEnv)
+ cli, err := clientv3.New(clientv3.Config{
+ Endpoints: conf.Endpoints,
+ DialTimeout: 10 * time.Second,
+ TLS: getTLSConfig(conf),
+ })
+ if err != nil {
+ fmt.Println("failed to create client")
+ log.Fatal(err)
+ }
+ return cli
+}
+
+func NewKeystore() store.Keystore {
+ cli := newEtcdClient()
+ return &etcKeystore{
+ Watcher: cli.Watcher,
+ KV: cli.KV,
+ Lease: cli.Lease,
+ Client: cli,
+ }
+}
+
+type etcKeystore struct {
+ Watcher clientv3.Watcher
+ KV clientv3.KV
+ Lease clientv3.Lease
+ Client *clientv3.Client
+}
+
+func (client *etcKeystore) NewMutex(lockKey string) (store.Mutex, error) {
+ session, err := concurrency.NewSession(client.Client)
+ if err != nil {
+ return nil, err
+ }
+ key := fmt.Sprintf("/locks/%s", lockKey)
+ return concurrency.NewMutex(session, key), nil
+}
+
+func handleError(err error) {
+ if err != nil {
+ switch err {
+ case context.Canceled:
+ log.Fatalf("ctx is canceled by another routine: %v", err)
+ case context.DeadlineExceeded:
+ log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
+ case rpctypes.ErrEmptyKey:
+ log.Fatalf("client-side error: %v", err)
+ default:
+ log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
+ }
+ }
+}
+
+func (client *etcKeystore) Close() error {
+ return client.Client.Close()
+}
+
+func (client *etcKeystore) runTransaction(ifOps []clientv3.Cmp, thenOps []clientv3.Op) (int64, error) {
+ response, err := client.Client.Txn(context.Background()).If(ifOps...).Then(thenOps...).Commit()
+ handleError(err)
+
+ if !response.Succeeded {
+ log.Println(ifOps)
+ return 0, fmt.Errorf("transaction failed, as condition not met")
+ }
+ return response.Header.Revision, nil
+}
+
+func (client *etcKeystore) Create(key string, value []byte) (int64, error) {
+ var ifOps []clientv3.Cmp
+ var thenOps []clientv3.Op
+ ifOps = append(ifOps, clientv3util.KeyMissing(key))
+ thenOps = append(thenOps, clientv3.OpPut(key, string(value)))
+ revision, err := client.runTransaction(ifOps, thenOps)
+ if err != nil {
+ return 0, fmt.Errorf("unable to create key: %s due to: %s", key, err)
+ }
+ return revision, nil
+}
+
+func (client *etcKeystore) Update(key string, value []byte, modRevision int64) (int64, error) {
+
+ var ifOps []clientv3.Cmp
+ var thenOps []clientv3.Op
+
+ if modRevision > 0 {
+ ifOps = append(ifOps, clientv3util.KeyExists(key))
+ checkModRev := clientv3.Compare(clientv3.ModRevision(key), "=", modRevision)
+ ifOps = append(ifOps, checkModRev)
+ }
+ thenOps = append(thenOps, clientv3.OpPut(key, string(value)))
+
+ newRevision, err := client.runTransaction(ifOps, thenOps)
+ if err != nil {
+ return 0, fmt.Errorf("unable to update ke: %s", err)
+ }
+ return newRevision, nil
+}
+
+func (client *etcKeystore) Delete(key string, modRevision int64) error {
+ var ifOps []clientv3.Cmp
+ var thenOps []clientv3.Op
+
+ ifOps = append(ifOps, clientv3util.KeyExists(key))
+ if modRevision > 0 {
+ checkModRev := clientv3.Compare(clientv3.ModRevision(key), "=", modRevision)
+ ifOps = append(ifOps, checkModRev)
+ }
+ thenOps = append(thenOps, clientv3.OpDelete(key))
+
+ _, err := client.runTransaction(ifOps, thenOps)
+ return err
+}
+
+func getKeyValueVersion(rawKeyValue *mvccpb.KeyValue) *store.KeyValueVersion {
+ if rawKeyValue == nil {
+ return nil
+ }
+ return &store.KeyValueVersion{
+ Key: string(rawKeyValue.Key),
+ Value: rawKeyValue.Value,
+ ModRevision: rawKeyValue.ModRevision,
+ CreateRevision: rawKeyValue.CreateRevision,
+ }
+}
+
+func (client *etcKeystore) IsExist(key string) (bool, error) {
+ response, err := client.Client.Get(context.Background(), key)
+ handleError(err)
+ return response.Count == 1, nil
+}
+
+func (client *etcKeystore) GetAll(prefix string) ([]store.KeyValueVersion, error) {
+ response, err := client.Client.Get(context.Background(), prefix, clientv3.WithPrefix())
+ handleError(err)
+
+ var values []store.KeyValueVersion
+ for _, rawKeyValue := range response.Kvs {
+ values = append(values, *getKeyValueVersion(rawKeyValue))
+ }
+ return values, nil
+}
+
+func (client *etcKeystore) Get(key string) (store.KeyValueVersion, error) {
+ response, err := client.Client.Get(context.Background(), key)
+ handleError(err)
+
+ value := store.KeyValueVersion{}
+
+ if response.Count == 0 {
+ return value, fmt.Errorf("unable to find any values for key: %s", key)
+ }
+ if response.Count > 1 {
+ panic(errors.New("should never get more than one value for get"))
+ }
+
+ return *getKeyValueVersion(response.Kvs[0]), nil
+}
+
+func (client *etcKeystore) KeepAliveKey(ctxt context.Context, key string) error {
+
+ getResponse, err := client.Client.Get(context.Background(), key)
+ if getResponse.Count == 1 {
+ // if another host seems to exist, back off for 10 seconds incase we just did a quick restart
+ time.Sleep(time.Second * 10)
+ }
+
+ // TODO what about configure timeout and ttl?
+ var ttl int64 = 10
+ grantResponse, err := client.Client.Grant(ctxt, ttl)
+ if err != nil {
+ log.Fatal(err)
+ }
+ leaseID := grantResponse.ID
+
+ txnResponse, err := client.Client.Txn(ctxt).
+ If(clientv3util.KeyMissing(key)).
+ Then(clientv3.OpPut(key, "keep-alive", clientv3.WithLease(leaseID), clientv3.WithPrevKV())).
+ Commit()
+ handleError(err)
+ if !txnResponse.Succeeded {
+ return fmt.Errorf("unable to create keep-alive key %s due to: %+v", key, txnResponse.Responses)
+ }
+
+ ch, err := client.Client.KeepAlive(ctxt, leaseID)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ counter := 9
+ go func() {
+ for range ch {
+ if counter >= 9 {
+ counter = 0
+ log.Println("Still refreshing key:", key)
+ } else {
+ counter++
+ }
+ }
+ // TODO: should allow context to be cancelled
+ log.Panicf("Unable to refresh key: %s", key)
+ }()
+ return nil
+}
+
+func (client *etcKeystore) DeleteAllKeysWithPrefix(prefix string) (int64, error) {
+ response, err := client.Client.Delete(context.Background(), prefix, clientv3.WithPrefix())
+ handleError(err)
+ return response.Deleted, nil
+}
+
+func (client *etcKeystore) Watch(ctxt context.Context, key string, withPrefix bool) store.KeyValueUpdateChan {
+ options := []clientv3.OpOption{clientv3.WithPrevKV()}
+ if withPrefix {
+ options = append(options, clientv3.WithPrefix())
+ }
+ rch := client.Watcher.Watch(ctxt, key, options...)
+
+ c := make(chan store.KeyValueUpdate)
+
+ go processWatchEvents(rch, c)
+
+ return c
+}
+
+func processWatchEvents(watchChan clientv3.WatchChan, c chan store.KeyValueUpdate) {
+ for watchResponse := range watchChan {
+ // if error, send empty update with an error
+ err := watchResponse.Err()
+ if err != nil {
+ c <- store.KeyValueUpdate{Err: err}
+ }
+
+ // send all events in this watch response
+ for _, ev := range watchResponse.Events {
+ update := store.KeyValueUpdate{
+ IsCreate: ev.IsCreate(),
+ IsModify: ev.IsModify(),
+ IsDelete: ev.Type == clientv3.EventTypeDelete,
+ }
+ if update.IsCreate || update.IsModify {
+ update.New = getKeyValueVersion(ev.Kv)
+ }
+ if update.IsDelete || update.IsModify {
+ update.Old = getKeyValueVersion(ev.PrevKv)
+ }
+
+ c <- update
+ }
+ }
+
+ // Assuming we get here when the context is cancelled or hits its timeout
+ // i.e. there are no more events, so we close the channel
+ close(c)
+}
diff --git a/tools/slurm-test.sh b/tools/slurm-test.sh
index 79787e07..7bba5e9a 100755
--- a/tools/slurm-test.sh
+++ b/tools/slurm-test.sh
@@ -81,3 +81,4 @@ sleep $SLEEP_INTERVAL
scontrol show burstbuffer
squeue
scontrol show bbstat
+sacct -o jobid'%10',reqtres'%45',alloctres'%45' -X