From ef2eb4521207ad575c4a9c9541a53eb3c383e145 Mon Sep 17 00:00:00 2001 From: yuuki takezawa Date: Sun, 4 Feb 2024 23:01:39 +0900 Subject: [PATCH] Fix panic due to early onEvent Logger call --- cluster/clusterproviders/zk/zk_provider.go | 2 - .../clusterproviders/zk/zk_provider_test.go | 150 +++++++++--------- 2 files changed, 74 insertions(+), 78 deletions(-) diff --git a/cluster/clusterproviders/zk/zk_provider.go b/cluster/clusterproviders/zk/zk_provider.go index ae0c707a..6072c035 100644 --- a/cluster/clusterproviders/zk/zk_provider.go +++ b/cluster/clusterproviders/zk/zk_provider.go @@ -339,14 +339,12 @@ func (p *Provider) updateLeadership(ns []*Node) { } func (p *Provider) onEvent(evt zk.Event) { - p.cluster.Logger().Debug("Zookeeper event.", slog.String("type", evt.Type.String()), slog.String("state", evt.State.String()), slog.String("path", evt.Path)) if evt.Type != zk.EventSession { return } switch evt.State { case zk.StateConnecting, zk.StateDisconnected, zk.StateExpired: if p.role == Leader { - p.cluster.Logger().Info("Role changed.", slog.String("from", Leader.String()), slog.String("to", Follower.String())) p.role = Follower p.roleChangedChan <- Follower } diff --git a/cluster/clusterproviders/zk/zk_provider_test.go b/cluster/clusterproviders/zk/zk_provider_test.go index 5eab8bdb..00f8a4c4 100644 --- a/cluster/clusterproviders/zk/zk_provider_test.go +++ b/cluster/clusterproviders/zk/zk_provider_test.go @@ -1,78 +1,76 @@ package zk -//TODO: fix this -// -//import ( -// "sync/atomic" -// "testing" -// "time" -// -// "github.com/asynkron/protoactor-go/actor" -// "github.com/asynkron/protoactor-go/cluster" -// "github.com/asynkron/protoactor-go/cluster/identitylookup/disthash" -// "github.com/asynkron/protoactor-go/remote" -// "github.com/stretchr/testify/suite" -//) -// -//type ZookeeperTestSuite struct { -// suite.Suite -//} -// -//func (suite *ZookeeperTestSuite) SetupTest() { -// -//} -// -//func (suite *ZookeeperTestSuite) TearDownTest() { -//} -// -//func TestZookeeperTestSuite(t *testing.T) { -// suite.Run(t, new(ZookeeperTestSuite)) -//} -// -//type ClusterAndSystem struct { -// Cluster *cluster.Cluster -// System *actor.ActorSystem -//} -// -//func (self *ClusterAndSystem) Shutdown() { -// self.Cluster.Shutdown(true) -//} -// -//func (suite *ZookeeperTestSuite) start(name string, opts ...cluster.ConfigOption) *ClusterAndSystem { -// cp, _ := New([]string{`localhost:8000`}) -// remoteConfig := remote.Configure("localhost", 0) -// config := cluster.Configure(name, cp, disthash.New(), remoteConfig, opts...) -// system := actor.NewActorSystem() -// c := cluster.New(system, config) -// c.StartMember() -// return &ClusterAndSystem{Cluster: c, System: system} -//} -// -//func (suite *ZookeeperTestSuite) TestEmptyExecute() { -// name := `cluster0` -// suite.start(name).Shutdown() -//} -// -//func (suite *ZookeeperTestSuite) TestMultiNodes() { -// var actorCount int32 -// props := actor.PropsFromFunc(func(ctx actor.Context) { -// switch ctx.Message().(type) { -// case *actor.Started: -// atomic.AddInt32(&actorCount, 1) -// } -// }) -// helloKind := cluster.NewKind("hello", props) -// -// name := `cluster1` -// c1 := suite.start(name, cluster.WithKinds(helloKind)) -// defer c1.Shutdown() -// c2 := suite.start(name, cluster.WithKinds(helloKind)) -// defer c2.Shutdown() -// c1.Cluster.Get(`a1`, `hello`) -// c2.Cluster.Get(`a2`, `hello`) -// for actorCount != 2 { -// time.Sleep(time.Microsecond * 5) -// } -// suite.Assert().Equal(2, c1.Cluster.MemberList.Members().Len(), "Expected 2 members in the cluster") -// suite.Assert().Equal(2, c2.Cluster.MemberList.Members().Len(), "Expected 2 members in the cluster") -//} +import ( + "sync/atomic" + "testing" + "time" + + "github.com/asynkron/protoactor-go/actor" + "github.com/asynkron/protoactor-go/cluster" + "github.com/asynkron/protoactor-go/cluster/identitylookup/disthash" + "github.com/asynkron/protoactor-go/remote" + "github.com/stretchr/testify/suite" +) + +type ZookeeperTestSuite struct { + suite.Suite +} + +func (suite *ZookeeperTestSuite) SetupTest() { + +} + +func (suite *ZookeeperTestSuite) TearDownTest() { +} + +func TestZookeeperTestSuite(t *testing.T) { + suite.Run(t, new(ZookeeperTestSuite)) +} + +type ClusterAndSystem struct { + Cluster *cluster.Cluster + System *actor.ActorSystem +} + +func (self *ClusterAndSystem) Shutdown() { + self.Cluster.Shutdown(true) +} + +func (suite *ZookeeperTestSuite) start(name string, opts ...cluster.ConfigOption) *ClusterAndSystem { + cp, _ := New([]string{`localhost:8000`}) + remoteConfig := remote.Configure("localhost", 0) + config := cluster.Configure(name, cp, disthash.New(), remoteConfig, opts...) + system := actor.NewActorSystem() + c := cluster.New(system, config) + c.StartMember() + return &ClusterAndSystem{Cluster: c, System: system} +} + +func (suite *ZookeeperTestSuite) TestEmptyExecute() { + name := `cluster0` + suite.start(name).Shutdown() +} + +func (suite *ZookeeperTestSuite) TestMultiNodes() { + var actorCount int32 + props := actor.PropsFromFunc(func(ctx actor.Context) { + switch ctx.Message().(type) { + case *actor.Started: + atomic.AddInt32(&actorCount, 1) + } + }) + helloKind := cluster.NewKind("hello", props) + + name := `cluster1` + c1 := suite.start(name, cluster.WithKinds(helloKind)) + defer c1.Shutdown() + c2 := suite.start(name, cluster.WithKinds(helloKind)) + defer c2.Shutdown() + c1.Cluster.Get(`a1`, `hello`) + c2.Cluster.Get(`a2`, `hello`) + for actorCount != 2 { + time.Sleep(time.Microsecond * 5) + } + suite.Assert().Equal(2, c1.Cluster.MemberList.Members().Len(), "Expected 2 members in the cluster") + suite.Assert().Equal(2, c2.Cluster.MemberList.Members().Len(), "Expected 2 members in the cluster") +}