-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathdata_vault_multihead.py
388 lines (337 loc) · 12.8 KB
/
data_vault_multihead.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
#!/usr/bin/python
# Copyright (C) 2012 Matthew Neeley
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
### BEGIN NODE INFO
[info]
name = Data Vault Multihead
instancename = Data Vault
version = 3.0.1
description = Store and retrieve numeric data
[startup]
cmdline = %PYTHON% %FILE% --auto
timeout = 20
[shutdown]
message = 987654321
timeout = 5
### END NODE INFO
"""
from __future__ import with_statement
import sys
import os
import re
import traceback
import warnings
from twisted.application.internet import TCPClient
from twisted.application.service import MultiService, Service
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from labrad import constants, protocol, util
import labrad.wrappers
from datavault import SessionStore
from datavault.server import DataVaultMultiHead
def lock_path(d):
'''
Lock a directory and return a file descriptor corresponding to the lockfile
This lock is non-blocking and throws an exception if it can't get the lock.
The user is expected to fix this.
'''
if os.name != "posix":
warnings.warn('File locks only available on POSIX. Be very careful not to run two copies of the data vault')
return
import fcntl
filename = os.path.join(d, 'lockfile')
fd = os.open(filename, os.O_CREAT|os.O_RDWR)
try:
fcntl.flock(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError:
raise RuntimeError('Unable to acquire filesystem lock. Data vault already running!')
if os.fstat(fd).st_size < 1:
os.write(fd, "If you delete this file without understanding it will cause you great pain\n")
return fd
def unlock(fd):
'''
We don't actually use this, since we hold the lock until the datavault exits
and let the OS clean up.
'''
if os.name != "posix":
warnings.warn('File locks only available on POSIX. Be very careful not to run two copies of the data vault')
return
import fcntl
fcntl.flock(fd, fcntl.LOCK_UN)
# One instance per manager, persistent (not recreated when connections are dropped)
class DataVaultConnector(Service):
"""Service that connects the Data Vault to a single LabRAD manager
If the manager is stopped or we lose the network connection,
this service attempts to reconnect so that we will come
back online when the manager is back up.
"""
reconnectDelay = 10
def __init__(self, host, port, password, hub, session_store):
self.host = host
self.port = port
self.password = password
self.hub = hub
self.session_store = session_store
self.connected = False
def report(self, message):
print '{}:{} - {}'.format(self.host, self.port, message)
@inlineCallbacks
def startService(self):
"""Connect to labrad in a loop, reconnecting after connection loss."""
self.running = True
while self.running:
self.report('Connecting...')
try:
dv = DataVaultMultiHead(self.host, self.port, self.password,
self.hub, self.session_store)
self.stop_func = yield self.start(dv)
self.report('Connected')
self.connected = True
except Exception:
self.report('Data Vault failed to start')
traceback.print_exc()
else:
try:
yield dv.onShutdown()
except Exception:
self.report('Disconnected with error')
traceback.print_exc()
else:
self.report('Disconnected')
self.hub.disconnect(dv)
self.connected = False
if self.running:
self.report('Will reconnect in {} seconds...'.format(
self.reconnectDelay))
yield util.wakeupCall(self.reconnectDelay)
@inlineCallbacks
def stopService(self):
self.running = False
if hasattr(self, 'stop_func'):
yield self.stop_func()
@inlineCallbacks
def start(self, dv):
"""Start the given DataVaultMultihead server.
The server startup and shutdown logic changed in pylabrad 0.95, so we
need separate logic to handle the old and new cases.
Args:
dv (DataVaultMultihead): The labrad server object that we want to
start.
Returns:
A deferred that fires after the server has successfully started.
This deferred contains a function that can be invoked to shutdown
the server. That function itself returns a deferred that will fire
when the shutdown is complete.
"""
if hasattr(dv, 'startup'):
# pylabrad 0.95+
p = yield protocol.connect(self.host, self.port)
yield p.authenticate(password=self.password)
yield dv.startup(p)
@inlineCallbacks
def stop_func():
dv.disconnect()
yield dv.onShutdown()
else:
# pylabrad 0.94 and earlier
try:
dv.configure_tls(self.host, "starttls")
except AttributeError:
self.report("pylabrad doesn't support TLS")
cxn = TCPClient(self.host, self.port, dv)
cxn.startService()
yield dv.onStartup()
@inlineCallbacks
def stop_func():
yield cxn.stopService()
returnValue(stop_func)
# Hub object: one instance total
class DataVaultServiceHost(MultiService):
"""Parent Service that manages multiple child DataVaultConnector's"""
signals = [
'onNewDir',
'onNewDataset',
'onTagsUpdated',
'onDataAvailable',
'onNewParameter',
'onCommentsAvailable'
]
def __init__(self, path, managers):
MultiService.__init__(self)
self.path = path
self.managers = managers
self.servers = set()
self.session_store = SessionStore(path, self)
for signal in self.signals:
self.wrapSignal(signal)
for host, port, password in managers:
self.add_server(host, port, password)
def connect(self, server):
self.servers.add(server)
def disconnect(self, server):
if server in self.servers:
self.servers.remove(server)
def reconnect(self, host_regex, port=0):
'''
Drop the connection to the specified host(s). They will auto-reconnect.
'''
for s in self.servers:
if re.match(host_regex, s.host) and (port == 0 or s.port==port):
s._cxn.disconnect()
def ping(self):
'''
Ping all attached managers as a keepalive/dropped connection detection mechanism
'''
for s in self.servers:
s.keepalive()
#s.client.manager.packet()
#p.echo('123')
#result = yield p.send()
# x = result.echo
# return result
def kick(self, host_regexp, port=0):
'''
Disconnect from a manager and don't reconnect.
'''
for connector in self:
if re.match(host_regexp, connector.host) and (port == 0 or port == connector.port):
try:
connector.stopService()
except Exception:
pass
@inlineCallbacks
def refresh_managers(self):
'''
Refresh list of managers from the registry. New servers will be added. Existing servers
will *not* be removed, even if they are no longer in the registry. Use "kick" to disconnect
them.
'''
# We don't know which (if any) managers are live. For now, just make a new client connection
# to the "primary" manager.
cxn = yield labrad.wrappers.connectAsync()
path = ['', 'Servers', 'Data Vault', 'Multihead']
reg = cxn.registry
p = reg.packet()
p.cd(path)
p.get("Managers", "*(sws)", key="managers")
ans = yield p.send()
for (host, port, password) in ans.managers:
if not port:
port = constants.MANAGER_PORT
if not password:
password = constants.PASSWORD
for connector in self:
if connector.host == host and connector.port == port:
break
else:
self.add_server(host, port, password)
cxn.disconnect()
return
def add_server(self, host, port, password):
dvc = DataVaultConnector(host, port, password, self, self.session_store)
dvc.setServiceParent(self)
def __str__(self):
managers = ['%s:%d' % (connector.host, connector.port) for connector in self]
return 'DataVaultServiceHost(%s)' % (managers,)
def wrapSignal(self, signal):
print 'wrapping signal:', signal
def relay(data, contexts=None, tag=None):
for c in contexts:
try:
sig = getattr(c.server, signal)
sig(data, [c.context], tag)
except Exception:
print '{}:{} - error relaying signal {}'.format(
c.server.host, c.server.port, signal)
traceback.print_exc()
setattr(self, signal, relay)
@inlineCallbacks
def load_settings_registry(cxn):
'''
Make a client connection to the labrad host specified in the
environment (i.e., by the node server) and load the rest of the settings
from there.
This file also takes care of locking the datavault storage directory.
The lock only works on the local host, so we also node lock the datavault:
if the registry has a 'Node' key, the datavault will refuse to start
on any other host. This should prevent ever having two copies of the
datavault running.
'''
path = ['', 'Servers', 'Data Vault', 'Multihead']
reg = cxn.registry
# try to load for this node
p = reg.packet()
p.cd(path)
p.get("Repository", 's', key="repo")
p.get("Managers", "*(sws)", key="managers")
p.get("Node", "s", False, "", key="node")
ans = yield p.send()
if ans.node and (ans.node != util.getNodeName()):
raise RuntimeError('Node name "%s" from registry does not match current host "%s"' % (ans.node, util.getNodeName()))
cxn.disconnect()
returnValue((ans.repo, ans.managers))
def load_settings_cmdline(argv):
if len(argv) < 3:
raise RuntimeError('Incorrect command line')
path = argv[1]
# We lock the datavault path, but we can't check the node lock unless using
# --auto to get the data from the registry.
manager_list = argv[2:]
managers = []
for m in manager_list:
password, sep, hostport = m.rpartition('@')
host, sep, port = hostport.partition(':')
if sep == '':
port = 0
else:
port = int(port)
managers.append((host, port, password))
return path, managers
def start_server(args):
path, managers = args
if not os.path.exists(path):
raise Exception('data path %s does not exist' % path)
if not os.path.isdir(path):
raise Exception('data path %s is not a directory' % path)
def parseManagerInfo(manager):
host, port, password = manager
if not password:
password = constants.PASSWORD
if not port:
port = constants.MANAGER_PORT
return (host, port, password)
lock_path(path)
managers = [parseManagerInfo(m) for m in managers]
service = DataVaultServiceHost(path, managers)
service.startService()
def main(argv=sys.argv):
@inlineCallbacks
def start():
try:
if len(argv) > 1 and argv[1] == '--auto':
cxn = yield labrad.wrappers.connectAsync()
settings = yield load_settings_registry(cxn)
else:
settings = load_settings_cmdline(argv)
start_server(settings)
except Exception as e:
print e
print 'usage: %s /path/to/vault/directory [password@]host[:port] [password2]@host2[:port2] ...' % (argv[0])
reactor.callWhenRunning(reactor.stop)
_ = start()
reactor.run()
if __name__ == '__main__':
main()