Skip to content

Commit

Permalink
x
Browse files Browse the repository at this point in the history
x  --

x
  • Loading branch information
yyforyongyu committed Dec 7, 2024
1 parent 16818e5 commit 22c164f
Show file tree
Hide file tree
Showing 8 changed files with 85 additions and 433 deletions.
405 changes: 5 additions & 400 deletions .github/workflows/main.yml

Large diffs are not rendered by default.

18 changes: 1 addition & 17 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -191,23 +191,7 @@ check: unit itest

db-instance:
ifeq ($(dbbackend),postgres)
# Remove a previous postgres instance if it exists.
docker rm lnd-postgres --force || echo "Starting new postgres container"

# Start a fresh postgres instance. Allow a maximum of 500 connections so
# that multiple lnd instances with a maximum number of connections of 20
# each can run concurrently. Note that many of the settings here are
# specifically for integration testing and are not fit for running
# production nodes. The increase in max connections ensures that there
# are enough entries allocated for the RWConflictPool to allow multiple
# conflicting transactions to track serialization conflicts. The
# increase in predicate locks and locks per transaction is to allow the
# queries to lock individual rows instead of entire tables, helping
# reduce serialization conflicts. Disabling sequential scan for small
# tables also helps prevent serialization conflicts by ensuring lookups
# lock only relevant rows in the index rather than the entire table.
docker run --name lnd-postgres -e POSTGRES_PASSWORD=postgres -p 6432:5432 -d postgres:13-alpine -N 1500 -c max_pred_locks_per_transaction=1024 -c max_locks_per_transaction=128 -c enable_seqscan=off
docker logs -f lnd-postgres >itest/postgres.log 2>&1 &
scripts/build_postgres.sh $(ITEST_PARALLELISM)

# Wait for the instance to be started.
sleep $(POSTGRES_START_DELAY)
Expand Down
6 changes: 6 additions & 0 deletions itest/lnd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@ func TestLightningNetworkDaemon(t *testing.T) {
ht := harnessTest.Subtest(t1)
ht.SetTestName(testCase.Name)

postgresPort := 6432
if testCasesRunTranche != nil {
postgresPort += int(*testCasesRunTranche)
}
ht.SetPostgresPort(postgresPort)

ht.RunTestCase(testCase)
})

Expand Down
4 changes: 4 additions & 0 deletions lntest/harness.go
Original file line number Diff line number Diff line change
Expand Up @@ -469,6 +469,10 @@ func (h *HarnessTest) SetTestName(name string) {
h.manager.currentTestCase = cleanTestCaseName
}

func (h *HarnessTest) SetPostgresPort(port int) {
h.manager.postgresPort = port
}

// NewNode creates a new node and asserts its creation. The node is guaranteed
// to have finished its initialization and all its subservers are started.
func (h *HarnessTest) NewNode(name string,
Expand Down
4 changes: 4 additions & 0 deletions lntest/harness_node_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ type nodeManager struct {

// feeServiceURL is the url of the fee service.
feeServiceURL string

// postgresPort is the port of the postgres instance.
postgresPort int
}

// newNodeManager creates a new node manager instance.
Expand Down Expand Up @@ -86,6 +89,7 @@ func (nm *nodeManager) newNode(t *testing.T, name string, extraArgs []string,
LndBinary: nm.lndBinary,
NetParams: miner.HarnessNetParams,
SkipUnlock: noAuth,
PostgresPort: nm.postgresPort,
}

node, err := node.NewHarnessNode(t, cfg)
Expand Down
4 changes: 4 additions & 0 deletions lntest/node/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,10 @@ type BaseNodeConfig struct {
// postgresDBName is the name of the postgres database where lnd data
// is stored in.
postgresDBName string

// postgresPort is the port where the postgres database is listening
// on.
PostgresPort int
}

func (cfg BaseNodeConfig) P2PAddr() string {
Expand Down
33 changes: 17 additions & 16 deletions lntest/node/harness_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,6 @@ const (
// release of announcements by AuthenticatedGossiper to the network.
trickleDelay = 50

postgresDsn = "postgres://postgres:postgres@localhost:" +
"6432/%s?sslmode=disable"

// commitInterval specifies the maximum interval the graph database
// will wait between attempting to flush a batch of modifications to
// disk(db.batch-commit-interval).
Expand Down Expand Up @@ -123,11 +120,11 @@ func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) {
var dbName string
if cfg.DBBackend == BackendPostgres {
var err error
dbName, err = createTempPgDB()
dbName, err = createTempPgDB(cfg.PostgresPort)
if err != nil {
return nil, err
}
cfg.PostgresDsn = postgresDatabaseDsn(dbName)
cfg.PostgresDsn = postgresDatabaseDsn(dbName, cfg.PostgresPort)
}

cfg.OriginalExtraArgs = cfg.ExtraArgs
Expand Down Expand Up @@ -837,8 +834,8 @@ func (hn *HarnessNode) BackupDB() error {
// Backup database.
backupDBName := hn.Cfg.postgresDBName + "_backup"
err := executePgQuery(
"CREATE DATABASE " + backupDBName + " WITH TEMPLATE " +
hn.Cfg.postgresDBName,
"CREATE DATABASE "+backupDBName+" WITH TEMPLATE "+
hn.Cfg.postgresDBName, hn.Cfg.PostgresPort,
)
if err != nil {
return err
Expand Down Expand Up @@ -868,14 +865,15 @@ func (hn *HarnessNode) RestoreDB() error {
// Restore database.
backupDBName := hn.Cfg.postgresDBName + "_backup"
err := executePgQuery(
"DROP DATABASE " + hn.Cfg.postgresDBName,
"DROP DATABASE "+hn.Cfg.postgresDBName,
hn.Cfg.PostgresPort,
)
if err != nil {
return err
}
err = executePgQuery(
"ALTER DATABASE " + backupDBName + " RENAME TO " +
hn.Cfg.postgresDBName,
"ALTER DATABASE "+backupDBName+" RENAME TO "+
hn.Cfg.postgresDBName, hn.Cfg.PostgresPort,
)
if err != nil {
return err
Expand Down Expand Up @@ -915,12 +913,15 @@ func (hn *HarnessNode) UpdateGlobalPolicy(policy *lnrpc.RoutingPolicy) {
hn.RPC.UpdateChannelPolicy(updateFeeReq)
}

func postgresDatabaseDsn(dbName string) string {
return fmt.Sprintf(postgresDsn, dbName)
func postgresDatabaseDsn(dbName string, port int) string {
postgresDsn := "postgres://postgres:postgres@localhost:" +
"%d/%s?sslmode=disable"

return fmt.Sprintf(postgresDsn, port, dbName)
}

// createTempPgDB creates a temp postgres database.
func createTempPgDB() (string, error) {
func createTempPgDB(port int) (string, error) {
// Create random database name.
randBytes := make([]byte, 8)
_, err := rand.Read(randBytes)
Expand All @@ -930,7 +931,7 @@ func createTempPgDB() (string, error) {
dbName := "itest_" + hex.EncodeToString(randBytes)

// Create database.
err = executePgQuery("CREATE DATABASE " + dbName)
err = executePgQuery("CREATE DATABASE "+dbName, port)
if err != nil {
return "", err
}
Expand All @@ -939,10 +940,10 @@ func createTempPgDB() (string, error) {
}

// executePgQuery executes a SQL statement in a postgres db.
func executePgQuery(query string) error {
func executePgQuery(query string, port int) error {
pool, err := pgxpool.Connect(
context.Background(),
postgresDatabaseDsn("postgres"),
postgresDatabaseDsn("postgres", port),
)
if err != nil {
return fmt.Errorf("unable to connect to database: %w", err)
Expand Down
44 changes: 44 additions & 0 deletions scripts/build_postgres.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/bin/bash

# Get all the variables.
PROCESSES=$1

# Remove a previous postgres instance if it exists.
containers=$(docker ps --filter name=lnd-postgres -aq)
if [ -n "$containers" ]; then
echo "Removing previous postgres instance..."
docker rm $containers --force --volumes
fi

echo "Starting new postgres container"

for ((i=0; i<PROCESSES; i++)); do
port=$((6432 + i))
container_name="lnd-postgres-$i"

echo "Starting postgres container $container_name on port $port"

# Start a fresh postgres instance. Allow a maximum of 500 connections so
# that multiple lnd instances with a maximum number of connections of 20
# each can run concurrently. Note that many of the settings here are
# specifically for integration testing and are not fit for running
# production nodes. The increase in max connections ensures that there are
# enough entries allocated for the RWConflictPool to allow multiple
# conflicting transactions to track serialization conflicts. The increase
# in predicate locks and locks per transaction is to allow the queries to
# lock individual rows instead of entire tables, helping reduce
# serialization conflicts. Disabling sequential scan for small tables also
# helps prevent serialization conflicts by ensuring lookups lock only
# relevant rows in the index rather than the entire table.
docker run \
--name $container_name \
-p $port:5432 \
-e POSTGRES_PASSWORD=postgres \
-d postgres:13-alpine \
-N 1500 \
-c max_pred_locks_per_transaction=1024 \
-c max_locks_per_transaction=128 \
-c enable_seqscan=off

# docker logs -f lnd-postgres >itest/postgres.log 2>&1 &
done

0 comments on commit 22c164f

Please sign in to comment.