forked from lightningnetwork/lnd
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdecayedlog_test.go
308 lines (249 loc) · 8.96 KB
/
decayedlog_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
package htlcswitch
import (
"crypto/rand"
"fmt"
"testing"
"time"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/stretchr/testify/require"
)
const (
cltv uint32 = 100000
)
// startup sets up the DecayedLog and possibly the garbage collector.
func startup(dbPath string, notifier bool) (sphinx.ReplayLog,
*mock.ChainNotifier, *sphinx.HashPrefix, func(), error) {
cfg := &kvdb.BoltConfig{
DBTimeout: time.Second,
}
backend, err := NewBoltBackendCreator(dbPath, "sphinxreplay.db")(cfg)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to create temporary "+
"decayed log db: %v", err)
}
var log sphinx.ReplayLog
var chainNotifier *mock.ChainNotifier
if notifier {
// Create the MockNotifier which triggers the garbage collector
chainNotifier = &mock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
EpochChan: make(chan *chainntnfs.BlockEpoch, 1),
ConfChan: make(chan *chainntnfs.TxConfirmation),
}
// Initialize the DecayedLog object
log = NewDecayedLog(backend, chainNotifier)
} else {
// Initialize the DecayedLog object
log = NewDecayedLog(backend, nil)
}
// Open the channeldb (start the garbage collector)
err = log.Start()
if err != nil {
return nil, nil, nil, nil, err
}
// Create a HashPrefix identifier for a packet. Instead of actually
// generating an ECDH secret and hashing it, simulate with random bytes.
// This is used as a key to retrieve the cltv value.
var hashedSecret sphinx.HashPrefix
_, err = rand.Read(hashedSecret[:])
if err != nil {
return nil, nil, nil, nil, err
}
stop := func() {
_ = log.Stop()
backend.Close()
}
return log, chainNotifier, &hashedSecret, stop, nil
}
// TestDecayedLogGarbageCollector tests the ability of the garbage collector
// to delete expired cltv values every time a block is received. Expired cltv
// values are cltv values that are < current block height.
func TestDecayedLogGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := t.TempDir()
d, notifier, hashedSecret, _, err := startup(dbPath, true)
require.NoError(t, err, "Unable to start up DecayedLog")
t.Cleanup(func() {
require.NoError(t, d.Stop())
})
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
require.NoError(t, err, "Unable to store in channeldb")
// Wait for database write (GC is in a goroutine)
time.Sleep(500 * time.Millisecond)
// Send block notifications to garbage collector. The garbage collector
// should remove the entry by block 100001.
// Send block 100000
notifier.EpochChan <- &chainntnfs.BlockEpoch{
Height: 100000,
}
// Assert that hashedSecret is still in the sharedHashBucket
val, err := d.Get(hashedSecret)
require.NoError(t, err, "Get failed - received an error upon Get")
if val != cltv {
t.Fatalf("GC incorrectly deleted CLTV")
}
// Send block 100001 (expiry block)
notifier.EpochChan <- &chainntnfs.BlockEpoch{
Height: 100001,
}
// Wait for database write (GC is in a goroutine)
time.Sleep(500 * time.Millisecond)
// Assert that hashedSecret is not in the sharedHashBucket
_, err = d.Get(hashedSecret)
if err == nil {
t.Fatalf("CLTV was not deleted")
}
if err != sphinx.ErrLogEntryNotFound {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
}
// TestDecayedLogPersistentGarbageCollector tests the persistence property of
// the garbage collector. The garbage collector will be restarted immediately and
// a block that expires the stored CLTV value will be sent to the ChainNotifier.
// We test that this causes the <hashedSecret, CLTV> pair to be deleted even
// on GC restarts.
func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := t.TempDir()
d, _, hashedSecret, stop, err := startup(dbPath, true)
require.NoError(t, err, "Unable to start up DecayedLog")
t.Cleanup(func() {
require.NoError(t, d.Stop())
})
// Store <hashedSecret, cltv> in the sharedHashBucket
if err = d.Put(hashedSecret, cltv); err != nil {
t.Fatalf("Unable to store in channeldb: %v", err)
}
// The hash prefix should be retrievable from the decayed log.
_, err = d.Get(hashedSecret)
if err != nil {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
// Shut down DecayedLog and the garbage collector along with it.
stop()
d2, notifier2, _, _, err := startup(dbPath, true)
require.NoError(t, err, "Unable to restart DecayedLog")
t.Cleanup(func() {
require.NoError(t, d2.Stop())
})
// Check that the hash prefix still exists in the new db instance.
_, err = d2.Get(hashedSecret)
if err != nil {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
// Send a block notification to the garbage collector that expires
// the stored CLTV.
notifier2.EpochChan <- &chainntnfs.BlockEpoch{
Height: int32(100001),
}
// Wait for database write (GC is in a goroutine)
time.Sleep(500 * time.Millisecond)
// Assert that hashedSecret is not in the sharedHashBucket
_, err = d2.Get(hashedSecret)
if err != sphinx.ErrLogEntryNotFound {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
}
// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the
// sharedHashBucket and then deletes it and finally asserts that we can no
// longer retrieve it.
func TestDecayedLogInsertionAndDeletion(t *testing.T) {
t.Parallel()
dbPath := t.TempDir()
d, _, hashedSecret, _, err := startup(dbPath, false)
require.NoError(t, err, "Unable to start up DecayedLog")
t.Cleanup(func() {
require.NoError(t, d.Stop())
})
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
require.NoError(t, err, "Unable to store in channeldb")
// Delete hashedSecret from the sharedHashBucket.
err = d.Delete(hashedSecret)
require.NoError(t, err, "Unable to delete from channeldb")
// Assert that hashedSecret is not in the sharedHashBucket
_, err = d.Get(hashedSecret)
if err == nil {
t.Fatalf("CLTV was not deleted")
}
if err != sphinx.ErrLogEntryNotFound {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
}
// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started,
// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog
// is stopped. The DecayedLog is then started up again and we test that the
// cltv value is indeed still stored in the sharedHashBucket. We then delete
// the cltv value and check that it persists upon startup.
func TestDecayedLogStartAndStop(t *testing.T) {
t.Parallel()
dbPath := t.TempDir()
d, _, hashedSecret, stop, err := startup(dbPath, false)
require.NoError(t, err, "Unable to start up DecayedLog")
t.Cleanup(func() {
require.NoError(t, d.Stop())
})
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
require.NoError(t, err, "Unable to store in channeldb")
// Shutdown the DecayedLog's channeldb
stop()
d2, _, hashedSecret2, stop, err := startup(dbPath, false)
require.NoError(t, err, "Unable to restart DecayedLog")
t.Cleanup(func() {
require.NoError(t, d2.Stop())
})
// Retrieve the stored cltv value given the hashedSecret key.
value, err := d2.Get(hashedSecret)
require.NoError(t, err, "Unable to retrieve from channeldb")
// Check that the original cltv value matches the retrieved cltv
// value.
if cltv != value {
t.Fatalf("Value retrieved doesn't match value stored")
}
// Delete hashedSecret from sharedHashBucket
err = d2.Delete(hashedSecret2)
require.NoError(t, err, "Unable to delete from channeldb")
// Shutdown the DecayedLog's channeldb
stop()
d3, _, hashedSecret3, _, err := startup(dbPath, false)
require.NoError(t, err, "Unable to restart DecayedLog")
t.Cleanup(func() {
require.NoError(t, d3.Stop())
})
// Assert that hashedSecret is not in the sharedHashBucket
_, err = d3.Get(hashedSecret3)
if err == nil {
t.Fatalf("CLTV was not deleted")
}
if err != sphinx.ErrLogEntryNotFound {
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
}
}
// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it
// via the nested sharedHashBucket and finally asserts that the original stored
// and retrieved cltv values are equal.
func TestDecayedLogStorageAndRetrieval(t *testing.T) {
t.Parallel()
dbPath := t.TempDir()
d, _, hashedSecret, _, err := startup(dbPath, false)
require.NoError(t, err, "Unable to start up DecayedLog")
t.Cleanup(func() {
require.NoError(t, d.Stop())
})
// Store <hashedSecret, cltv> in the sharedHashBucket
err = d.Put(hashedSecret, cltv)
require.NoError(t, err, "Unable to store in channeldb")
// Retrieve the stored cltv value given the hashedSecret key.
value, err := d.Get(hashedSecret)
require.NoError(t, err, "Unable to retrieve from channeldb")
// If the original cltv value does not match the value retrieved,
// then the test failed.
if cltv != value {
t.Fatalf("Value retrieved doesn't match value stored")
}
}