From 4872eedd83b102d9c454293570e6f8068035d543 Mon Sep 17 00:00:00 2001 From: idk Date: Sat, 7 Oct 2023 19:20:14 +0000 Subject: [PATCH] Router: This moves all client sub-netDbs into the ClientConnectionRunners. This turns the FloodfillNetworkDatabaseSegmentor into a means of looking up netDb's and managing the main netDb and a special netDb for managing multihomes. It adds the ability to use sub-netDbs as a defense against netDb context confusion bugs, and also to switch to a single monolithic netDb if necessary. --- .../web/helpers/ConfigKeyringHandler.java | 4 +- .../i2p/router/web/helpers/NetDbHelper.java | 13 +- .../i2p/router/web/helpers/NetDbRenderer.java | 21 +- .../i2p/router/web/helpers/SummaryHelper.java | 2 +- .../net/i2p/router/ClientManagerFacade.java | 27 ++ .../src/net/i2p/router/RouterContext.java | 1 - .../src/net/i2p/router/RouterVersion.java | 2 +- .../router/client/ClientConnectionRunner.java | 76 ++++- .../net/i2p/router/client/ClientManager.java | 47 +++ .../client/ClientManagerFacadeImpl.java | 42 +++ .../client/ClientMessageEventListener.java | 12 +- .../net/i2p/router/client/LookupDestJob.java | 14 +- .../dummy/DummyClientManagerFacade.java | 14 +- .../dummy/DummyNetworkDatabaseFacade.java | 27 +- .../HandleDatabaseLookupMessageJob.java | 14 +- .../FloodfillNetworkDatabaseFacade.java | 9 +- .../FloodfillNetworkDatabaseSegmentor.java | 311 +++++++----------- .../kademlia/FloodfillVerifyStoreJob.java | 2 +- ...andleFloodfillDatabaseStoreMessageJob.java | 10 +- .../KademliaNetworkDatabaseFacade.java | 140 ++++---- .../kademlia/PersistentDataStore.java | 32 +- .../kademlia/SearchUpdateReplyFoundJob.java | 4 +- .../SegmentedNetworkDatabaseFacade.java | 79 +++-- .../i2p/router/startup/BootNetworkDbJob.java | 3 +- .../tunnel/InboundMessageDistributor.java | 8 +- 25 files changed, 506 insertions(+), 408 deletions(-) diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java index 17aa06ece5..6dde53c201 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java @@ -90,11 +90,11 @@ protected void processForm() { return; } // from BlindCache - List clientBase32s = _context.netDbSegmentor().lookupClientBySigningPublicKey(spk); + List clientBase32s = _context.netDbSegmentor().lookupClientBySigningPublicKey(spk); // TODO: This updates all of the blind data for all clients, turning the blind cache into a shared context for the owner of an encrypted leaseSet. // This is probably not ideal, with some social-engineering a service operator who owns an encrypted destination could associate 2 tunnels. // How realistic is it? Maybe not very, but I don't like it. Still, this is better than nothing. - for (String clientBase32 : clientBase32s) { + for (Hash clientBase32 : clientBase32s) { BlindData bdold = _context.clientNetDb(clientBase32).getBlindData(spk); if (bdold != null && d == null) d = bdold.getDestination(); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbHelper.java index 4c00ca07b6..e4080edb52 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbHelper.java @@ -6,11 +6,14 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import net.i2p.crypto.EncType; import net.i2p.crypto.SigType; import net.i2p.data.DataHelper; +import net.i2p.data.Hash; +import net.i2p.data.router.RouterInfo; import net.i2p.util.SystemVersion; import net.i2p.router.sybil.Analysis; import net.i2p.router.web.FormHandler; @@ -327,7 +330,7 @@ public String getFloodfillNetDbSummary() { return getNetDbSummary(null, false); } - public String getNetDbSummary(String client, boolean clientOnly) { + public String getNetDbSummary(Hash client, boolean clientOnly) { NetDbRenderer renderer = new NetDbRenderer(_context); try { if (client == null && !clientOnly) @@ -359,7 +362,7 @@ else if ((_mode == 13 || _mode == 16) && !_postOK) } else if (_full == 6) { renderer.renderStatusHTML(_out, _limit, _page, _full, null, true); } else if (_clientOnly && client == null) { - for (String _client : _context.netDbSegmentor().getClients()) { + for (Hash _client : _context.clientManager().getPrimaryHashes()) { renderer.renderLeaseSetHTML(_out, _debug, _client, clientOnly); } } else { @@ -373,7 +376,7 @@ else if ((_mode == 13 || _mode == 16) && !_postOK) return ""; } - public String getClientNetDbSummary(String client) { + public String getClientNetDbSummary(Hash client) { return getNetDbSummary(client, true); } @@ -428,6 +431,10 @@ private void renderNavBar() throws IOException { continue; // can't nav to lookup if (i > 2 && i != tab && !isAdvanced()) continue; + if (i == 10 || i == 11) { + if (_context.netDbSegmentor().getRoutersKnownToClients().size() == 0) + continue; + } if (i == tab) { // we are there if (span) diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java index fc107cdccb..c0ca01171d 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java @@ -127,15 +127,19 @@ public void renderRouterInfoHTML(Writer out, int pageSize, int page, String country, String family, String caps, String ip, String sybil, int port, int highPort, SigType type, EncType etype, String mtu, String ipv6, String ssucaps, - String tr, int cost, int icount, String client, boolean allClients) throws IOException { + String tr, int cost, int icount, Hash client, boolean allClients) throws IOException { StringBuilder buf = new StringBuilder(4*1024); List sybils = sybil != null ? new ArrayList(128) : null; FloodfillNetworkDatabaseFacade netdb = _context.netDb(); if (allClients) { netdb = _context.netDb(); }else{ - if (client != null) + if (client != null) { + Log _log = _context.logManager().getLog(NetDbRenderer.class); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("client subdb for: " + client); netdb = _context.clientNetDb(client); + } else netdb = _context.netDb(); } @@ -608,7 +612,7 @@ private static boolean hasCap(RouterInfo ri, String caps) { * @param debug @since 0.7.14 sort by distance from us, display * median distance, and other stuff, useful when floodfill */ - public void renderLeaseSetHTML(Writer out, boolean debug, String client, boolean clientsOnly) throws IOException { + public void renderLeaseSetHTML(Writer out, boolean debug, Hash client, boolean clientsOnly) throws IOException { StringBuilder buf = new StringBuilder(4*1024); if (debug) buf.append("

Debug mode - Sorted by hash distance, closest first

\n"); @@ -619,8 +623,12 @@ public void renderLeaseSetHTML(Writer out, boolean debug, String client, boolean if (clientsOnly){ netdb = _context.netDb(); }else{ - if (client != null) + if (client != null) { + Log _log = _context.logManager().getLog(NetDbRenderer.class); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("client subdb for: " + client); netdb = _context.clientNetDb(client); + } else netdb = _context.netDb(); } @@ -635,8 +643,9 @@ public void renderLeaseSetHTML(Writer out, boolean debug, String client, boolean } if (clientsOnly) leases.addAll(_context.netDbSegmentor().getLeasesKnownToClients()); - else + else{ leases.addAll(netdb.getLeases()); + } int medianCount = 0; int rapCount = 0; BigInteger median = null; @@ -951,7 +960,7 @@ private void renderLeaseSet(StringBuilder buf, LeaseSet ls, boolean debug, long * @param mode 0: charts only; 1: full routerinfos; 2: abbreviated routerinfos * mode 3: Same as 0 but sort countries by count */ - public void renderStatusHTML(Writer out, int pageSize, int page, int mode, String client, boolean clientsOnly) throws IOException { + public void renderStatusHTML(Writer out, int pageSize, int page, int mode, Hash client, boolean clientsOnly) throws IOException { if (!_context.netDb().isInitialized()) { out.write("
"); out.write(_t("Not initialized")); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/SummaryHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/SummaryHelper.java index 9980731795..bb6144ab56 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/SummaryHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/SummaryHelper.java @@ -632,7 +632,7 @@ public String getDestinations() { else buf.append(DataHelper.escapeHTML(ServletUtil.truncate(name, 29))).append("…"); buf.append("\n"); - LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetHashIsClient(h); + LeaseSet ls = _context.clientNetDb(client.calculateHash()).lookupLeaseSetLocally(h); if (ls != null && _context.tunnelManager().getOutboundClientTunnelCount(h) > 0) { if (!ls.isCurrent(0)) { // yellow light diff --git a/router/java/src/net/i2p/router/ClientManagerFacade.java b/router/java/src/net/i2p/router/ClientManagerFacade.java index 82f62bb237..3be5e9aea6 100644 --- a/router/java/src/net/i2p/router/ClientManagerFacade.java +++ b/router/java/src/net/i2p/router/ClientManagerFacade.java @@ -20,6 +20,7 @@ import net.i2p.data.LeaseSet; import net.i2p.data.i2cp.MessageId; import net.i2p.data.i2cp.SessionConfig; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; /** * Manage all interactions with clients @@ -121,4 +122,30 @@ public void registerMetaDest(Destination dest) throws I2PSessionException {} * @since 0.9.41 */ public void unregisterMetaDest(Destination dest) {} + + /** + * get the FloodfillNetworkDatabaseFacade associated with a particular client destination. + * This is inside the runner, so it won't be there if the runner isn't ready. + * + * @param destHash destination hash associated with the client who's subDb we're looking for + * @return non-null FloodfillNetworkDatabaseFacade + * @since 0.9.60 + */ + public abstract FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash); + + /** + * get all of the FloodfillNetworkDatabaseFacades for all of the clients. + * + * @return non-null set of FloodfillNetworkDatabaseFacades + * @since 0.9.60 + */ + public abstract Set getClientFloodfillNetworkDatabaseFacades(); + + /** + * get a set of all primary hashes + * + * @return non-null set of Hashes + * @since 0.9.60 + */ + public abstract Set getPrimaryHashes(); } diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java index bffbd269ac..7033358bff 100644 --- a/router/java/src/net/i2p/router/RouterContext.java +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -375,7 +375,6 @@ public Hash routerHash() { public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; } public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); } public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); } - public FloodfillNetworkDatabaseFacade clientNetDb(String id) { return _netDb.clientNetDB(id); } public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); } /** * The actual driver of the router, where all jobs are enqueued and processed. diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index ac0e1462a0..a048657272 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -20,7 +20,7 @@ public class RouterVersion { public final static String VERSION = CoreVersion.VERSION; /** for example: "beta", "alpha", "rc" */ public final static String STATUS = ""; - public final static long BUILD = 4; + public final static long BUILD = 5; /** for example "-test" */ public final static String EXTRA = ""; public final static String FULL_VERSION = VERSION + "-" + STATUS + BUILD + EXTRA; diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index 963b112f4f..640992e431 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -49,6 +49,8 @@ import net.i2p.router.RouterContext; import net.i2p.router.crypto.TransientSessionKeyManager; import net.i2p.router.crypto.ratchet.RatchetSKM; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor; import net.i2p.router.crypto.ratchet.MuxedSKM; import net.i2p.util.ConcurrentHashSet; import net.i2p.util.I2PThread; @@ -90,6 +92,8 @@ class ClientConnectionRunner { protected I2CPMessageReader _reader; /** Used for all sessions, which must all have the same crypto keys */ private SessionKeyManager _sessionKeyManager; + /** Used for leaseSets sent to and recieved from this client */ + private FloodfillNetworkDatabaseFacade _floodfillNetworkDatabaseFacade; /** * This contains the last 10 MessageIds that have had their (non-ack) status * delivered to the client (so that we can be sure only to update when necessary) @@ -156,6 +160,8 @@ public ClientConnectionRunner(RouterContext context, ClientManager manager, Sock _alreadyProcessed = new ArrayList(); _acceptedPending = new ConcurrentHashSet(); _messageId = new AtomicInteger(_context.random().nextInt()); + // Set up the per-destination FloodfillNetworkDatabaseFacade to prevent clients from being able to + // update leaseSet entries in the floodfill netDb } private static final AtomicInteger __id = new AtomicInteger(); @@ -207,23 +213,25 @@ public synchronized void stopRunning() { _acceptedPending.clear(); if (_sessionKeyManager != null) _sessionKeyManager.shutdown(); + if (_floodfillNetworkDatabaseFacade != null) + if (_floodfillNetworkDatabaseFacade.isClientDb()) + _floodfillNetworkDatabaseFacade.shutdown(); if (_encryptedLSHash != null) _manager.unregisterEncryptedDestination(this, _encryptedLSHash); _manager.unregisterConnection(this); // netdb may be null in unit tests - Hash dbid = getDestHash(); - if (_context.netDbSegmentor() != null) { + if (_context.netDb() != null) { // Note that if the client sent us a destroy message, // removeSession() was called just before this, and // _sessions will be empty. for (SessionParams sp : _sessions.values()) { LeaseSet ls = sp.currentLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); if (!sp.isPrimary) _context.tunnelManager().removeAlias(sp.dest); } @@ -459,12 +467,12 @@ void removeSession(SessionId id) { // Tell client manger _manager.unregisterSession(id, sp.dest); LeaseSet ls = sp.currentLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); isPrimary = sp.isPrimary; if (isPrimary) _context.tunnelManager().removeTunnels(sp.dest); @@ -484,12 +492,12 @@ void removeSession(SessionId id) { _log.info("Destroying remaining client subsession " + sp.sessionId); _manager.unregisterSession(sp.sessionId, sp.dest); LeaseSet ls = sp.currentLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; - if (ls != null) - _context.clientNetDb(dbid).unpublish(ls); + if (ls != null && getFloodfillNetworkDatabaseFacade() != null) + getFloodfillNetworkDatabaseFacade().unpublish(ls); _context.tunnelManager().removeAlias(sp.dest); synchronized(this) { if (sp.rerequestTimer != null) @@ -564,6 +572,18 @@ void removePayload(MessageId id) { public int sessionEstablished(SessionConfig config) { Destination dest = config.getDestination(); Hash destHash = dest.calculateHash(); + if (destHash != null){ + if (_log.shouldLog(Log.DEBUG)) { + _log.debug("Initializing subDb for client" + destHash); + } + _floodfillNetworkDatabaseFacade = new FloodfillNetworkDatabaseFacade(_context, destHash); + _floodfillNetworkDatabaseFacade.startup(); + } else { + if (_log.shouldLog(Log.ERROR)) { + _log.error("Initializing subDb for unknown client" + dest, new Exception()); + } + _floodfillNetworkDatabaseFacade = null; + } if (_log.shouldLog(Log.DEBUG)) _log.debug("SessionEstablished called for destination " + destHash); if (_sessions.size() > MAX_SESSIONS) @@ -590,7 +610,6 @@ public int sessionEstablished(SessionConfig config) { _dontSendMSM = "none".equals(opts.getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US)); _dontSendMSMOnReceive = Boolean.parseBoolean(opts.getProperty(I2PClient.PROP_FAST_RECEIVE)); } - // Set up the // per-destination session key manager to prevent rather easy correlation // based on the specified encryption types in the config @@ -1150,6 +1169,33 @@ private boolean alreadyAccepted(MessageId id) { */ private final static long REQUEUE_DELAY = 500; private static final int MAX_REQUEUE = 60; // 30 sec. + + /** + * Get the FloodfillNetworkDatabaseFacade for this runner. This is the client + * netDb if the router is configured to use subDbs, or the main netDb if the + * router is configured to use a monolithic netDb. + * + * If neither a client netDb or the main netDb is available, it will return null. + * This should be impossible. + * If you get the `getFloodfillNetworkDatabaseFacade is null for runner` warning, + * the main netDb will be returned instead. If the main netDb is null, then null + * will be returned. + * + * @return _floodfillNetworkDatabaseFacade + * @since 0.9.60 + */ + public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() { + if (!_context.netDbSegmentor().useSubDbs()) + return _context.netDb(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash()); + if (_floodfillNetworkDatabaseFacade == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("getFloodfillNetworkDatabaseFacade is null for runner", new Exception()); + return _context.netDb(); + } + return this._floodfillNetworkDatabaseFacade; + } private class MessageDeliveryStatusUpdate extends JobImpl { private final SessionId _sessId; diff --git a/router/java/src/net/i2p/router/client/ClientManager.java b/router/java/src/net/i2p/router/client/ClientManager.java index a8bc8f46ee..5bf2b17564 100644 --- a/router/java/src/net/i2p/router/client/ClientManager.java +++ b/router/java/src/net/i2p/router/client/ClientManager.java @@ -43,6 +43,8 @@ import net.i2p.router.Job; import net.i2p.router.JobImpl; import net.i2p.router.RouterContext; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor; import net.i2p.util.ConcurrentHashSet; import net.i2p.util.I2PThread; import net.i2p.util.Log; @@ -771,6 +773,51 @@ public void messageReceived(ClientMessage msg) { (new HandleJob(msg)).runJob(); } + /** + * get the FloodfillNetworkDatabaseFacade associated with a particular client destination. + * This is inside the runner, so it won't be there if the runner isn't ready. + * + * @param destHash destination hash associated with the client who's subDb we're looking for + * @return may be null if it does not exist + */ + public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash) { + if (destHash != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Getting subDb for desthash: " + destHash); + ClientConnectionRunner runner = getRunner(destHash); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("ClientManager got a runner in getClientFloodfillNetworkDatabaseFacade for " + destHash); + return runner.getFloodfillNetworkDatabaseFacade(); + } + return null; + } + + /** + * get all of the FloodfillNetworkDatabaseFacades for all of the clients. + * + * @return non-null + */ + public Set getClientFloodfillNetworkDatabaseFacades() { + Set rv = new HashSet(); + for (ClientConnectionRunner runner : _runners.values()) { + if (runner != null) + rv.add(runner.getFloodfillNetworkDatabaseFacade()); + } + return rv; + } + + /** + * get all the primary hashes for all the clients and return them as a set + * + * @return + */ + public Set getPrimaryHashes() { + Set rv = new HashSet(); + for (ClientConnectionRunner runner : _runners.values()) + rv.add(runner.getDestHash()); + return rv; + } + private class HandleJob extends JobImpl { private final ClientMessage _msg; diff --git a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java index 629d3e5e07..f73af84bb5 100644 --- a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java +++ b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java @@ -28,6 +28,7 @@ import net.i2p.router.ClientMessage; import net.i2p.router.Job; import net.i2p.router.RouterContext; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; import net.i2p.util.Log; /** @@ -290,4 +291,45 @@ public void unregisterMetaDest(Destination dest) { if (_manager != null) _manager.unregisterMetaDest(dest); } + + /** + * get the FloodfillNetworkDatabaseFacade associated with a particular client destination. + * This is inside the runner, so it won't be there if the runner isn't ready. + * + * @param destHash destination hash associated with the client who's subDb we're looking for + * @return + */ + @Override + public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash) { + if (_manager != null) + return _manager.getClientFloodfillNetworkDatabaseFacade(destHash); + else + return null; + } + + /** + * get all of the FloodfillNetworkDatabaseFacades for all of the clients. + * + * @return + */ + @Override + public Set getClientFloodfillNetworkDatabaseFacades() { + if (_manager != null) + return _manager.getClientFloodfillNetworkDatabaseFacades(); + else + return Collections.emptySet(); + } + + /** + * get all the primary hashes for all the clients and return them as a set + * + * @return + */ + @Override + public Set getPrimaryHashes() { + if (_manager != null) + return _manager.getPrimaryHashes(); + else + return Collections.emptySet(); + } } diff --git a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java index 9d9fb4c821..b65aac84fb 100644 --- a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java +++ b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java @@ -711,13 +711,13 @@ protected void handleCreateLeaseSet(CreateLeaseSetMessage message) { } if (_log.shouldDebug()) _log.debug("Publishing: " + ls); - _context.clientNetDb(_runner.getDestHash()).publish(ls); + _runner.getFloodfillNetworkDatabaseFacade().publish(ls); if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) { // store the decrypted ls also EncryptedLeaseSet encls = (EncryptedLeaseSet) ls; if (_log.shouldDebug()) _log.debug("Storing decrypted: " + encls.getDecryptedLeaseSet()); - _context.clientNetDb(dest.getHash()).store(dest.getHash(), encls.getDecryptedLeaseSet()); + _runner.getFloodfillNetworkDatabaseFacade().store(dest.getHash(), encls.getDecryptedLeaseSet()); } } catch (IllegalArgumentException iae) { if (_log.shouldLog(Log.ERROR)) @@ -861,9 +861,9 @@ private void handleBlindingInfo(BlindingInfoMessage message) { _log.warn("Unsupported BlindingInfo type: " + message); return; } - BlindData obd = _context.clientNetDb(_runner.getDestHash()).getBlindData(spk); + BlindData obd = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk); if (obd == null) { - _context.clientNetDb(_runner.getDestHash()).setBlindData(bd); + _runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd); if (_log.shouldWarn()) _log.warn("New: " + bd); } else { @@ -884,7 +884,7 @@ private void handleBlindingInfo(BlindingInfoMessage message) { return; } } - _context.clientNetDb(_runner.getDestHash()).setBlindData(bd); + _runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd); if (_log.shouldWarn()) _log.warn("Updated: " + bd); } else { @@ -893,7 +893,7 @@ private void handleBlindingInfo(BlindingInfoMessage message) { if (nexp > oexp) { obd.setExpiration(nexp); // to force save at shutdown - _context.clientNetDb(_runner.getDestHash()).setBlindData(obd); + _runner.getFloodfillNetworkDatabaseFacade().setBlindData(obd); if (_log.shouldWarn()) _log.warn("Updated expiration: " + obd); } else { diff --git a/router/java/src/net/i2p/router/client/LookupDestJob.java b/router/java/src/net/i2p/router/client/LookupDestJob.java index 90000b53f1..7bdfc50339 100644 --- a/router/java/src/net/i2p/router/client/LookupDestJob.java +++ b/router/java/src/net/i2p/router/client/LookupDestJob.java @@ -91,11 +91,7 @@ public LookupDestJob(RouterContext context, ClientConnectionRunner runner, try { bd = Blinding.decode(context, b); SigningPublicKey spk = bd.getUnblindedPubKey(); - BlindData bd2; - if (_fromLocalDest == null) - bd2 = getContext().netDb().getBlindData(spk); - else - bd2 = getContext().clientNetDb(_fromLocalDest).getBlindData(spk); + BlindData bd2 = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk); if (bd2 != null) { // BlindData from database may have privkey or secret // check if we need it but don't have it @@ -114,7 +110,7 @@ public LookupDestJob(RouterContext context, ClientConnectionRunner runner, long exp = now + ((bd.getAuthRequired() || bd.getSecretRequired()) ? 365*24*60*60*1000L : 90*24*68*60*1000L); bd.setExpiration(exp); - getContext().clientNetDb(_fromLocalDest).setBlindData(bd); + _runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd); } h = bd.getBlindedHash(); if (_log.shouldDebug()) @@ -189,7 +185,7 @@ else if (fail1) if (timeout > 1500) timeout -= 500; // TODO tell router this is an encrypted lookup, skip 38 or earlier ffs? - getContext().clientNetDb(_fromLocalDest).lookupDestination(_hash, done, timeout, _fromLocalDest); + _runner.getFloodfillNetworkDatabaseFacade().lookupDestination(_hash, done, timeout, _fromLocalDest); } else { // blinding decode fail returnFail(HostReplyMessage.RESULT_DECRYPTION_FAILURE); @@ -208,10 +204,10 @@ public DoneJob(RouterContext enclosingContext) { } public String getName() { return "LeaseSet Lookup Reply to Client"; } public void runJob() { - Destination dest = getContext().clientNetDb(_fromLocalDest).lookupDestinationLocally(_hash); + Destination dest = _runner.getFloodfillNetworkDatabaseFacade().lookupDestinationLocally(_hash); if (dest == null && _blindData != null) { // TODO store and lookup original hash instead - LeaseSet ls = getContext().clientNetDb(_fromLocalDest).lookupLeaseSetLocally(_hash); + LeaseSet ls = _runner.getFloodfillNetworkDatabaseFacade().lookupLeaseSetLocally(_hash); if (ls != null && ls.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) { // already decrypted EncryptedLeaseSet encls = (EncryptedLeaseSet) ls; diff --git a/router/java/src/net/i2p/router/dummy/DummyClientManagerFacade.java b/router/java/src/net/i2p/router/dummy/DummyClientManagerFacade.java index ba40badd00..735059fc5d 100644 --- a/router/java/src/net/i2p/router/dummy/DummyClientManagerFacade.java +++ b/router/java/src/net/i2p/router/dummy/DummyClientManagerFacade.java @@ -8,6 +8,9 @@ * */ +import java.util.Collections; +import java.util.Set; + import net.i2p.crypto.SessionKeyManager; import net.i2p.data.Destination; import net.i2p.data.Hash; @@ -18,6 +21,7 @@ import net.i2p.router.ClientMessage; import net.i2p.router.Job; import net.i2p.router.RouterContext; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; /** * Manage all interactions with clients @@ -49,6 +53,14 @@ public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, long public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; } public void requestLeaseSet(Hash dest, LeaseSet set) {} - + public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash dbid) { + return null; + } + public Set getClientFloodfillNetworkDatabaseFacades() { + return Collections.emptySet(); + } + public Set getPrimaryHashes() { + return Collections.emptySet(); + } } diff --git a/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java index c18298e1d0..52de05923e 100644 --- a/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java @@ -23,6 +23,7 @@ import net.i2p.router.Job; import net.i2p.router.RouterContext; import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor; import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade; import net.i2p.router.networkdb.kademlia.SegmentedNetworkDatabaseFacade; @@ -33,16 +34,12 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { public DummyNetworkDatabaseFacade(RouterContext ctx) { super(ctx); - _fndb = new FloodfillNetworkDatabaseFacade(ctx, "dummy"); + _fndb = new FloodfillNetworkDatabaseFacade(ctx, FloodfillNetworkDatabaseSegmentor.MAIN_DBID); _fndb.startup(); _routers = Collections.synchronizedMap(new HashMap()); _context = ctx; } - public FloodfillNetworkDatabaseFacade getSubNetDB(String dbid){ - return null; - } - public FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid){ return null; } @@ -98,11 +95,6 @@ public LeaseSet lookupLeaseSetHashIsClient(Hash key) { throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'"); } - @Override - public LeaseSet lookupLeaseSetLocally(Hash key, String dbid) { - throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetLocally'"); - } - @Override public FloodfillNetworkDatabaseFacade mainNetDB() { return _fndb; @@ -113,26 +105,11 @@ public FloodfillNetworkDatabaseFacade multiHomeNetDB() { return _fndb; } - @Override - public FloodfillNetworkDatabaseFacade clientNetDB(String id) { - return _fndb; - } - @Override public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) { return _fndb; } - @Override - public String getDbidByHash(Hash clientKey) { - throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'"); - } - - @Override - public List getClients() { - throw new UnsupportedOperationException("Unimplemented method 'getClients'"); - } - @Override public Set getSubNetDBs(){ throw new UnsupportedOperationException("Unimplemented method 'getSubNetDBs'"); diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java index 0e8365b167..262ea9a538 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java @@ -147,7 +147,10 @@ public void runJob() { // Only send it out if it is in our estimated keyspace. // For this, we do NOT use their dontInclude list as it can't be trusted // (i.e. it could mess up the closeness calculation) - LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey); + LeaseSet possibleMultihomed = null; + if (getContext().netDbSegmentor().useSubDbs()) { + possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey); + } Set closestHashes = getContext().netDb().findNearestRouters(searchKey, CLOSENESS_THRESHOLD, null); if (weAreClosest(closestHashes)) { @@ -162,7 +165,7 @@ public void runJob() { _log.info("We have local LS " + searchKey + ", answering query, in our keyspace"); getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1); sendData(searchKey, ls, fromKey, toTunnel); - } else if (possibleMultihomed != null) { + } else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) { // If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively // always recievedAsPublished. No need to decide whether or not to answer the request like above, just // answer it so it doesn't look different from other stores. @@ -181,8 +184,11 @@ public void runJob() { sendClosest(searchKey, routerHashSet, fromKey, toTunnel); } } else { - LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey); - if (possibleMultihomed != null) { + LeaseSet possibleMultihomed = null; + if (getContext().netDbSegmentor().useSubDbs()) { + possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey); + } + if ((getContext().netDbSegmentor().useSubDbs()) && possibleMultihomed != null) { if (possibleMultihomed.getReceivedAsPublished()) { if (_log.shouldLog(Log.INFO)) _log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS."); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index 01c8953fed..cffc1f16a9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -66,10 +66,13 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad private static final long NEXT_RKEY_LS_ADVANCE_TIME = 10*60*1000; private static final int NEXT_FLOOD_QTY = 2; - public FloodfillNetworkDatabaseFacade(RouterContext context, String dbid) { + public FloodfillNetworkDatabaseFacade(RouterContext context) { + this(context, FloodfillNetworkDatabaseSegmentor.MAIN_DBID); + } + public FloodfillNetworkDatabaseFacade(RouterContext context, Hash dbid) { super(context, dbid); _activeFloodQueries = new HashMap(); - _verifiesInProgress = new ConcurrentHashSet(8); + _verifiesInProgress = new ConcurrentHashSet(8); long[] rate = new long[] { 60*60*1000L }; _context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); @@ -123,7 +126,7 @@ public synchronized void startup() { @Override protected void createHandlers() { // Only initialize the handlers for the flooodfill netDb. - if (super._dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID)) { + if (!isClientDb()) { if (_log.shouldInfo()) _log.info("[dbid: " + super._dbid + "] Initializing the message handlers"); _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this)); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java index e650618a59..bd42337a42 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java @@ -1,25 +1,16 @@ package net.i2p.router.networkdb.kademlia; -import java.io.IOException; -import java.io.Writer; -//import java.rmi.dgc.Lease; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import net.i2p.data.BlindData; -import net.i2p.data.DatabaseEntry; -import net.i2p.data.Destination; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; import net.i2p.data.SigningPublicKey; -import net.i2p.data.TunnelId; import net.i2p.data.router.RouterInfo; -import net.i2p.router.Job; import net.i2p.router.RouterContext; -import net.i2p.router.networkdb.reseed.ReseedChecker; import net.i2p.util.Log; /** @@ -61,13 +52,11 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade { protected final Log _log; private RouterContext _context; - private Map _subDBs = new HashMap(); - public static final String MAIN_DBID = "main"; - public static final String MULTIHOME_DBID = "clients_multihome"; - private static final String EXPLORATORY_DBID = "clients_exploratory"; + private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation"; + public static final Hash MAIN_DBID = null; + public static final Hash MULTIHOME_DBID = Hash.FAKE_HASH; private final FloodfillNetworkDatabaseFacade _mainDbid; private final FloodfillNetworkDatabaseFacade _multihomeDbid; - private final FloodfillNetworkDatabaseFacade _exploratoryDbid; /** * Construct a new FloodfillNetworkDatabaseSegmentor with the given @@ -83,7 +72,10 @@ public FloodfillNetworkDatabaseSegmentor(RouterContext context) { _context = context; _mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID); _multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID); - _exploratoryDbid = new FloodfillNetworkDatabaseFacade(_context, EXPLORATORY_DBID); + } + + public boolean useSubDbs() { + return _context.getProperty(PROP_NETDB_ISOLATION, true); } /** @@ -91,79 +83,55 @@ public FloodfillNetworkDatabaseSegmentor(RouterContext context) { * If the ID is null, the main database is returned. * * @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve - * @return the FloodfillNetworkDatabaseFacade object corresponding to the ID + * @return the FloodfillNetworkDatabaseFacade object corresponding to the ID or null if it does not exist. */ @Override protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) { - if (id == null) - return getSubNetDB(MAIN_DBID); - return getSubNetDB(id.toBase32()); + if (!useSubDbs()) + return _mainDbid; + return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id); } /** - * Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID string. - * - * @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve - * @return the FloodfillNetworkDatabaseFacade object for the specified ID - * - */ - @Override - protected FloodfillNetworkDatabaseFacade getSubNetDB(String id) { - if (id == null || id.isEmpty() || id.equals(MAIN_DBID)) - return mainNetDB(); - if (id.equals(MULTIHOME_DBID)) - return multiHomeNetDB(); - if (id.equals(EXPLORATORY_DBID)) - return clientNetDB(); - - if (id.endsWith(".i2p")) { - if (!id.startsWith("clients_")) - id = "clients_" + id; - } - - FloodfillNetworkDatabaseFacade subdb = _subDBs.get(id); - if (subdb == null) { - subdb = new FloodfillNetworkDatabaseFacade(_context, id); - _subDBs.put(id, subdb); - subdb.startup(); - subdb.createHandlers(); - } - return subdb; + * If we are floodfill, turn it off and tell everybody for the _mainDbid and the + * _multihomeDbid + * + * @since 0.9.60 + * + */ + public synchronized void shutdown() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("shutdown called from FNDS, shutting down main and multihome db"); + _mainDbid.shutdown(); + if (useSubDbs()) + _multihomeDbid.shutdown(); } /** - * If we are floodfill, turn it off and tell everybody. - * Shut down all known subDbs. + * Start up the floodfill for the _mainDbid and the _multihomeDbid * * @since 0.9.60 * */ - public synchronized void shutdown() { - _mainDbid.shutdown(); - _multihomeDbid.shutdown(); - // shut down every entry in _subDBs - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Shutting down all remaining sub-netDbs", - new Exception()); - subdb.shutdown(); - } + public synchronized void startup() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("startup called from FNDS, starting up main and multihome db"); + _mainDbid.startup(); + if (useSubDbs()) + _multihomeDbid.startup(); } /** * list of the RouterInfo objects for all known peers; * * @since 0.9.60 - * + * @return non-null */ public List getKnownRouterData() { List rv = new ArrayList(); for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Called from FNDS, will be combined with all other subDbs", - new Exception()); + _log.debug("getKnownRouterData Called from FNDS,"+subdb._dbid+", will be combined with all other subDbs"); rv.addAll(subdb.getKnownRouterData()); } return rv; @@ -175,14 +143,13 @@ public List getKnownRouterData() { * List is not sorted and not shuffled. * * @since 0.9.60 + * @return non-null */ public List getFloodfillPeers() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("getFloodfillPeers collecting all floodfill peers across all subDbs"); List peers = new ArrayList(); for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); peers.addAll(subdb.getFloodfillPeers()); } return peers; @@ -193,12 +160,12 @@ public List getFloodfillPeers() { * but the client dbid is not. * * @param key The LS key for client. + * @return may be null * @since 0.9.60 */ @Override public LeaseSet lookupLeaseSetHashIsClient(Hash key) { - String dbid = matchDbid(key); - return lookupLeaseSetLocally(key, dbid); + return lookupLeaseSetLocally(key, null); } /** @@ -206,27 +173,21 @@ public LeaseSet lookupLeaseSetHashIsClient(Hash key) { * if a DBID is not provided, the clients will all be checked, and the * first value will be used. * + * @return may be null * @since 0.9.60 - * */ - @Override - protected LeaseSet lookupLeaseSetLocally(Hash key, String dbid) { - if (dbid == null || dbid.isEmpty()) { + //@Override + protected LeaseSet lookupLeaseSetLocally(Hash key, Hash dbid) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("lookupLeaseSetLocally on all subDbs: " + key.toBase32()); + if (dbid == null) { LeaseSet rv = null; - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); + for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) { rv = subdb.lookupLeaseSetLocally(key); if (rv != null) { return rv; } } - rv = this.lookupLeaseSetLocally(key, MAIN_DBID); - if (rv != null) { - return rv; - } } return this.getSubNetDB(dbid).lookupLeaseSetLocally(key); } @@ -235,11 +196,15 @@ protected LeaseSet lookupLeaseSetLocally(Hash key, String dbid) { * Check if all of the known subDbs are initialized * * @since 0.9.60 - * + * @return true if the mainNetdb and all known client netDbs are initialized */ public boolean isInitialized() { - boolean rv = mainNetDB().isInitialized(); - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { + if (_mainDbid == null) + return false; + boolean rv = _mainDbid.isInitialized(); + if (!rv) + return rv; + for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) { rv = subdb.isInitialized(); if (!rv) { break; @@ -248,37 +213,18 @@ public boolean isInitialized() { return rv; } - /** - * list of the RouterInfo objects for all known peers - * - * @since 0.9.60 - * - */ - @Override - public Set getRouters() { - Set rv = new HashSet<>(); - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - rv.addAll(subdb.getRouters()); - } - return rv; - } - - - /** * list of the RouterInfo objects for all known peers known to clients(in subDbs) only * * @since 0.9.60 - * + * @return non-null */ public Set getRoutersKnownToClients() { Set rv = new HashSet<>(); - for (String key : getClients()) { - rv.addAll(this.getSubNetDB(key).getRouters()); + for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) { + Set rv2 = subdb.getRouters(); + if (rv2 != null) + rv.addAll(rv2); } return rv; } @@ -287,29 +233,14 @@ public Set getRoutersKnownToClients() { * list of the LeaseSet objects for all known peers known to clients(in subDbs) only * * @since 0.9.60 - * + * @return non-null */ public Set getLeasesKnownToClients() { Set rv = new HashSet<>(); - for (String key : getClients()) { - rv.addAll(this.getSubNetDB(key).getLeases()); - } - return rv; - } - - /** - * list all of the dbids of all known client subDbs - * - * @since 0.9.60 - * - */ - public List getClients() { - List rv = new ArrayList(); - for (String key : _subDBs.keySet()) { - if (key != null && !key.isEmpty()) { - if (key.startsWith("client")) - rv.add(key); - } + for (FloodfillNetworkDatabaseFacade fndf : getClientSubNetDBs()) { + Set rv2 = fndf.getLeases(); + if (rv2 != null) + rv.addAll(rv2); } return rv; } @@ -318,7 +249,7 @@ public List getClients() { * get the main netDb, which is the one we will use if we are a floodfill * * @since 0.9.60 - * + * @return may be null */ @Override public FloodfillNetworkDatabaseFacade mainNetDB() { @@ -329,69 +260,55 @@ public FloodfillNetworkDatabaseFacade mainNetDB() { * get the multiHome netDb, which is especially for handling multihomes * * @since 0.9.60 - * + * @return may be null */ @Override public FloodfillNetworkDatabaseFacade multiHomeNetDB() { return _multihomeDbid; } - /** - * get the client netDb for the given id. - * Will return the "exploratory(default client)" netDb if - * the dbid is null. - * - * @since 0.9.60 - * - */ - @Override - public FloodfillNetworkDatabaseFacade clientNetDB(String id) { - if (id == null || id.isEmpty()) - return clientNetDB(); - return this.getSubNetDB(id); - } - /** * get the client netDb for the given id * Will return the "exploratory(default client)" netDb if * the dbid is null. * * @since 0.9.60 - * + * @return may be null if the client netDb does not exist */ @Override public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) { - if (id != null) - return getSubNetDB(id.toBase32()); - return clientNetDB(); - } - - /** - * get the default client(exploratory) netDb - * - * @since 0.9.60 - * - */ - public FloodfillNetworkDatabaseFacade clientNetDB() { - return _exploratoryDbid; + if (_log.shouldDebug()) + _log.debug("looked up clientNetDB: " + id); + if (!useSubDbs()) + return _mainDbid; + if (id != null){ + FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id); + if (fndf != null) + return fndf; + } + return mainNetDB(); } /** - * look up the dbid of the client with the given signing public key + * look up the dbid of the client or clients with the given signing + * public key * * @since 0.9.60 - * + * @return non-null */ @Override - public List lookupClientBySigningPublicKey(SigningPublicKey spk) { - List rv = new ArrayList<>(); - for (String subdb : getClients()) { + public List lookupClientBySigningPublicKey(SigningPublicKey spk) { + List rv = new ArrayList<>(); + for (Hash subdb : _context.clientManager().getPrimaryHashes()) { + FloodfillNetworkDatabaseFacade fndf = _context.clientManager().getClientFloodfillNetworkDatabaseFacade(subdb); + if (fndf == null) + continue; // if (subdb.startsWith("clients_")) // TODO: see if we can access only one subDb at a time when we need // to look up a client by SPK. We mostly need this for managing blinded // and encrypted keys in the Keyring Config UI page. See also // ConfigKeyringHelper - BlindData bd = _subDBs.get(subdb).getBlindData(spk); + BlindData bd = fndf.getBlindData(spk); if (bd != null) { rv.add(subdb); } @@ -400,44 +317,44 @@ public List lookupClientBySigningPublicKey(SigningPublicKey spk) { } /** - * Public helper to return the dbid that is associated with the - * supplied client key. - * - * @param clientKey The LS key of the subDb context + * get all the subDbs and return them in a Set. This includes the main netDb + * and the possible-multihomes netDb + * * @since 0.9.60 + * @return non-null */ @Override - public String getDbidByHash(Hash clientKey) { - return matchDbid(clientKey); - } - - /** - * Return the dbid that is associated with the supplied client LS key - * - * @param clientKey The LS key of the subDb context - * @since 0.9.60 - */ - private String matchDbid(Hash clientKey) { - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { - if (subdb.matchClientKey(clientKey)) - return subdb._dbid; + public Set getSubNetDBs() { + if (!_mainDbid.isInitialized()) + return Collections.emptySet(); + Set rv = new HashSet<>(); + if (!useSubDbs()) { + rv.add(_mainDbid); + return rv; } - return null; + rv.add(_mainDbid); + rv.add(multiHomeNetDB()); + rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades()); + return rv; } /** - * get all the subDbs and return them in a Set. + * get all the subDbs and return them in a Set. This only includes subDbs associated + * with specific clients, unless subDbs are disabled in which case it only contains the + * main netDB * * @since 0.9.60 - * + * @return non-null */ - @Override - public Set getSubNetDBs() { + private Set getClientSubNetDBs() { + if (!_mainDbid.isInitialized()) + return Collections.emptySet(); Set rv = new HashSet<>(); - rv.add(mainNetDB()); - rv.add(multiHomeNetDB()); - rv.add(clientNetDB()); - rv.addAll(_subDBs.values()); + if (!useSubDbs()) { + rv.add(_mainDbid); + return rv; + } + rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades()); return rv; } @@ -445,12 +362,12 @@ public Set getSubNetDBs() { * list of the BlindData objects for all known clients * * @since 0.9.60 - * + * @return non-null */ @Override public List getLocalClientsBlindData() { List rv = new ArrayList<>(); - for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { + for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) { rv.addAll(subdb.getBlindData()); } return rv; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java index 3b9dcead84..36d08fc03a 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java @@ -460,7 +460,7 @@ public void runJob() { private void resend() { // It's safe to check the default netDb first, but if the lookup is for // a client, nearly all RI is expected to be found in the FF netDb. - DatabaseEntry ds = getContext().netDbSegmentor().getSubNetDB(_facade._dbid).lookupLocally(_key); + DatabaseEntry ds = _facade.lookupLocally(_key); if ((ds == null) && _facade.isClientDb() && _isRouterInfo) // It's safe to check the floodfill netDb for RI ds = getContext().netDb().lookupLocally(_key); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java index ea37d12757..766d946992 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java @@ -134,12 +134,7 @@ public void runJob() { if (!ls.getReceivedAsReply()) ls.setReceivedAsPublished(); if (_facade.isClientDb()) - if (_facade.matchClientKey(key)) - // In the client subDb context, the only local key to worry about - // is the key for this client. - blockStore = true; - else - blockStore = false; + blockStore = false; else if (getContext().clientManager().isLocal(key)) // Non-client context if (_facade.floodfillEnabled() && (_fromHash != null)) @@ -155,7 +150,8 @@ else if (getContext().clientManager().isLocal(key)) getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0); // throw rather than return, so that we send the ack below (prevent easy attack) dontBlamePeer = true; - getContext().multihomeNetDb().store(key, ls); + if (getContext().netDbSegmentor().useSubDbs()) + getContext().multihomeNetDb().store(key, ls); throw new IllegalArgumentException("(dbid: " + _facade._dbid + ") Peer attempted to store local leaseSet: " + key.toBase32()); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index bc0e50d4f7..060553ae1f 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -80,7 +80,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad private NegativeLookupCache _negativeCache; protected final int _networkID; private final BlindCache _blindCache; - protected final String _dbid; + protected final Hash _dbid; private Hash _localKey; /** @@ -172,7 +172,7 @@ void searchComplete(Hash key) { private static final int BUCKET_SIZE = 24; private static final int KAD_B = 4; - public KademliaNetworkDatabaseFacade(RouterContext context, String dbid) { + public KademliaNetworkDatabaseFacade(RouterContext context, Hash dbid) { _context = context; _dbid = dbid; _log = _context.logManager().getLog(getClass()); @@ -297,8 +297,8 @@ public void rescan() { String getDbDir() { if (_dbDir == null) { String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR); - if (!_dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID) && _dbid != null) { - File subDir = new File(dbDir, _dbid); + if (_dbid != FloodfillNetworkDatabaseSegmentor.MAIN_DBID) { + File subDir = new File(dbDir, _dbid.toBase32()); dbDir = subDir.toString(); } return dbDir; @@ -306,12 +306,37 @@ String getDbDir() { return _dbDir; } + /** + * Check if the database is a client DB. + * + * @return true if the database is a client DB, false otherwise + * @since 0.9.60 + */ public boolean isClientDb() { - return _dbid.startsWith("clients_"); + // This is a null check in disguise, don't use .equals() here. + // FNDS.MAIN_DBID is always null. and if _dbid is also null it is not a client Db + if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID) + return false; + if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID)) + return false; + return true; } + + /** + * Checks if the current database is a multihome database. + * + * @return true if the current database is a multihome database, false otherwise. + * @since 0.9.60 + */ public boolean isMultihomeDb() { - return _dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID); + // This is a null check in disguise, don't use .equals() here. + // FNDS.MAIN_DBID is always null, and if _dbid is null it is not the multihome Db + if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID) + return false; + if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID)) + return true; + return false; } public synchronized void startup() { @@ -357,27 +382,29 @@ public synchronized void startup() { } if (!QUIET) { - // fill the search queue with random keys in buckets that are too small - // Disabled since KBucketImpl.generateRandomKey() is b0rked, - // and anyway, we want to search for a completely random key, - // not a random key for a particular kbucket. - // _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this)); - if (_exploreJob == null) - _exploreJob = new StartExplorersJob(_context, this); - // fire off a group of searches from the explore pool - // Don't start it right away, so we don't send searches for random keys - // out our 0-hop exploratory tunnels (generating direct connections to - // one or more floodfill peers within seconds of startup). - // We're trying to minimize the ff connections to lessen the load on the - // floodfills, and in any case let's try to build some real expl. tunnels first. - // No rush, it only runs every 30m. - _exploreJob.getTiming().setStartAfter(now + EXPLORE_JOB_DELAY); - _context.jobQueue().addJob(_exploreJob); + if (!isClientDb() && !isMultihomeDb()) { + // fill the search queue with random keys in buckets that are too small + // Disabled since KBucketImpl.generateRandomKey() is b0rked, + // and anyway, we want to search for a completely random key, + // not a random key for a particular kbucket. + // _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this)); + if (_exploreJob == null) + _exploreJob = new StartExplorersJob(_context, this); + // fire off a group of searches from the explore pool + // Don't start it right away, so we don't send searches for random keys + // out our 0-hop exploratory tunnels (generating direct connections to + // one or more floodfill peers within seconds of startup). + // We're trying to minimize the ff connections to lessen the load on the + // floodfills, and in any case let's try to build some real expl. tunnels first. + // No rush, it only runs every 30m. + _exploreJob.getTiming().setStartAfter(now + EXPLORE_JOB_DELAY); + _context.jobQueue().addJob(_exploreJob); + } } else { _log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively"); _log.warn("This should NOT be used in production"); } - if (_dbid == null || _dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID) || _dbid.isEmpty()) { + if (!isClientDb() && !isMultihomeDb()) { // periodically update and resign the router's 'published date', which basically // serves as a version Job plrij = new PublishLocalRouterInfoJob(_context); @@ -821,18 +848,24 @@ public void publish(LeaseSet localLeaseSet) throws IllegalArgumentException { _log.error("locally published leaseSet is not valid?", iae); throw iae; } - if (_localKey != null) { - if (!_localKey.equals(localLeaseSet.getHash())) - if (_log.shouldLog(Log.ERROR)) - _log.error("Error, the local LS hash (" - + _localKey + ") does not match the published hash (" - + localLeaseSet.getHash() + ")! This shouldn't happen!", - new Exception()); - } else { - // This will only happen once when the local LS is first published - _localKey = localLeaseSet.getHash(); - if (_log.shouldLog(Log.INFO)) - _log.info("Local client LS key initialized to: " + _localKey); + if (!_context.netDbSegmentor().useSubDbs()){ + String dbid = "main netDb"; + if (isClientDb()) { + dbid = "client netDb: " + _dbid; + } + if (_localKey != null) { + if (!_localKey.equals(localLeaseSet.getHash())) + if (_log.shouldLog(Log.ERROR)) + _log.error("[" + dbid + "]" + "Error, the local LS hash (" + + _localKey + ") does not match the published hash (" + + localLeaseSet.getHash() + ")! This shouldn't happen!", + new Exception()); + } else { + // This will only happen once when the local LS is first published + _localKey = localLeaseSet.getHash(); + if (_log.shouldLog(Log.INFO)) + _log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey); + } } if (!_context.clientManager().shouldPublishLeaseSet(h)) return; @@ -1040,31 +1073,18 @@ public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentExcepti if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) { if (_log.shouldDebug()) _log.debug("Not storing older " + key); - // TODO: Determine if this deep equals is actually truly necessary as part of this test or if the date is actually enough - if (rv.equals(leaseSet)) { - if (_log.shouldDebug()) - _log.debug("Updating leaseSet found in Datastore " + key); - /** - DatabaseEntry.java note - * we used to just copy the flags here but due to concerns about crafted - * entries being used to "follow" a leaseSet from one context to another, - * i.e. sent to a client vs sent to a router. Copying the entire leaseSet, - * flags and all, limits the ability of the attacker craft leaseSet entries - * maliciously. - */ - _ds.put(key, leaseSet); - rv = (LeaseSet)_ds.get(key); - Hash to = leaseSet.getReceivedBy(); - if (to != null) { - rv.setReceivedBy(to); - } else if (leaseSet.getReceivedAsReply()) { - rv.setReceivedAsReply(); - } - if (leaseSet.getReceivedAsPublished()) { - rv.setReceivedAsPublished(); - } - return rv; - }// TODO: Is there any reason to do anything here, if the fields are somehow unequal? - // Like, is there any case where this is not true? I don't think it's possible for it to be. + // if it hasn't changed, no need to do anything + // except copy over the flags + Hash to = leaseSet.getReceivedBy(); + if (to != null) { + rv.setReceivedBy(to); + } else if (leaseSet.getReceivedAsReply()) { + rv.setReceivedAsReply(); + } + if (leaseSet.getReceivedAsPublished()) { + rv.setReceivedAsPublished(); + } + return rv; } } catch (ClassCastException cce) { throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java index 7214e545ba..42df339c9c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java @@ -396,13 +396,7 @@ public void runJob() { public void wakeup() { requeue(0); } - - private void setNetDbReady() { - // Only the floodfill netDb needs to call Router::setNetDbReady() - if (_facade._dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID)) - _context.router().setNetDbReady(); - } - + private void readFiles() { int routerCount = 0; @@ -467,11 +461,11 @@ private void readFiles() { // This is enough to let i2ptunnel get started. // Do not set _initialized yet so we don't start rescanning. _setNetDbReady = true; - setNetDbReady(); + _context.router().setNetDbReady(); } else if (i == 500 && !_setNetDbReady) { // do this for faster systems also at 500 _setNetDbReady = true; - setNetDbReady(); + _context.router().setNetDbReady(); } } } @@ -479,35 +473,23 @@ private void readFiles() { if (!_initialized) { _initialized = true; - if (_facade.isClientDb()) { - _lastReseed = _context.clock().now(); - _setNetDbReady = true; - setNetDbReady(); - } else if (_facade.isMultihomeDb()) { - _lastReseed = _context.clock().now(); - _setNetDbReady = true; - setNetDbReady(); - } else if (_facade.reseedChecker().checkReseed(routerCount)) { + if (_facade.reseedChecker().checkReseed(routerCount)) { _lastReseed = _context.clock().now(); // checkReseed will call wakeup() when done and we will run again } else { _setNetDbReady = true; - setNetDbReady(); + _context.router().setNetDbReady(); } } else if (_lastReseed < _context.clock().now() - MIN_RESEED_INTERVAL) { int count = Math.min(routerCount, size()); - if (_facade.isClientDb()) { - _lastReseed = _context.clock().now(); - } else if (_facade.isMultihomeDb()) { - _lastReseed = _context.clock().now(); - } else if (count < MIN_ROUTERS) { + if (count < MIN_ROUTERS) { if (_facade.reseedChecker().checkReseed(count)) _lastReseed = _context.clock().now(); // checkReseed will call wakeup() when done and we will run again } else { if (!_setNetDbReady) { _setNetDbReady = true; - setNetDbReady(); + _context.router().setNetDbReady(); } } } else { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java index b20157efcf..3296cb63bc 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java @@ -87,11 +87,11 @@ public void runJob() { try { if (entry.isRouterInfo()) { RouterInfo ri = (RouterInfo) entry; - getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ri.getHash(), ri); + _facade.store(ri.getHash(), ri); } if (entry.isLeaseSet()) { LeaseSet ls = (LeaseSet) entry; - getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ls.getHash(), ls); + _facade.store(ls.getHash(), ls); } } catch (UnsupportedCryptoException iae) { // don't blame the peer diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java index e973c2c805..f80e68aaea 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java @@ -18,6 +18,7 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.RouterContext; import net.i2p.router.networkdb.reseed.ReseedChecker; +import net.i2p.util.Log; /** * SegmentedNetworkDatabaseFacade @@ -59,22 +60,28 @@ public abstract class SegmentedNetworkDatabaseFacade { public SegmentedNetworkDatabaseFacade(RouterContext context) { // super(context, null); } - + /** - * Get a sub-netDb using a string identifier + * Determine whether to use subDb defenses at all or to use the extant FNDF/RAP/RAR defenses * + * @return true if using subDbs, false if not * @since 0.9.60 */ - protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(String dbid); + public boolean useSubDbs() { + return false; + } + /** * Get a sub-netDb using a Hash identifier * + * @return client subDb for hash, or null if it does not exist * @since 0.9.60 */ protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid); /** * Get the main netDb, the one which is used if we're a floodfill * + * @return may be null if main netDb is not initialized * @since 0.9.60 */ public abstract FloodfillNetworkDatabaseFacade mainNetDB(); @@ -82,79 +89,77 @@ public SegmentedNetworkDatabaseFacade(RouterContext context) { * Get the multihome netDb, the one which is used if we're a floodfill AND we * have a multihome address sent to us * + * @return may be null if the multihome netDb is not initialized * @since 0.9.60 */ public abstract FloodfillNetworkDatabaseFacade multiHomeNetDB(); - /** - * Get a client netDb for a given client string identifier. Will never - * return the mainNetDB. - * - * @since 0.9.60 - */ - public abstract FloodfillNetworkDatabaseFacade clientNetDB(String dbid); /** * Get a client netDb for a given client Hash identifier. Will never * return the mainNetDB. * + * @return may be null if the client netDb does not exist * @since 0.9.60 */ public abstract FloodfillNetworkDatabaseFacade clientNetDB(Hash dbid); /** - * Shut down the network database and all subDbs. + * Shut down the network databases * * @since 0.9.60 */ public abstract void shutdown(); /** - * Lookup the leaseSet for a given key in only client dbs. - * - * @since 0.9.60 - */ - public abstract LeaseSet lookupLeaseSetHashIsClient(Hash key); - /** - * Lookup the leaseSet for a given key locally across all dbs if dbid is - * null, or locally for the given dbid if it is not null. Use carefully, - * this function crosses db boundaries and is intended only for local use. + * Start up the network databases * * @since 0.9.60 */ - protected abstract LeaseSet lookupLeaseSetLocally(Hash key, String dbid); + public abstract void startup(); /** - * Lookup the dbid for a given hash. + * Lookup the leaseSet for a given key in only client dbs. * + * @return may be null * @since 0.9.60 */ - public abstract String getDbidByHash(Hash clientKey); + public abstract LeaseSet lookupLeaseSetHashIsClient(Hash key); /** * Get a set of all sub-netDbs. * + * @return all the sub netDbs including the main * @since 0.9.60 */ public abstract Set getSubNetDBs(); /** - * Get a set of all client dbid strings + * Make sure the SNDF is initialized. This is overridden in + * FloodfillNetworkDatabaseSegmentor so that it will be false until + * *all* required subDbs are initialized. * + * @return true if the netDbs are initialized * @since 0.9.60 */ - public abstract List getClients(); - /** - * Make sure the SNDF is initialized - */ public boolean isInitialized() { return mainNetDB().isInitialized(); } + /** - * Get a set of all routers + * list all of the RouterInfo objects known to all of the subDbs including + * the main subDb. * + * @return all of the RouterInfo objects known to all of the netDbs. non-null * @since 0.9.60 */ public Set getRouters() { - return mainNetDB().getRouters(); + Set rv = new HashSet<>(); + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { + rv.addAll(subdb.getRouters()); + } + return rv; } /** - * Get a set of all routers known to clients, which should always be zero. + * list of the RouterInfo objects for all known peers in all client + * subDbs which is mostly pointless because they should normally reject + * them anyway. * + * @return non-null all the routerInfos in all of the client netDbs *only* * @since 0.9.60 */ public Set getRoutersKnownToClients() { @@ -167,8 +172,11 @@ public Set getRoutersKnownToClients() { } /** - * Get a set of all leases known to all clients. + * Get a set of all leases known to all clients. These will be + * leaseSets for destinations that the clients communicate with + * and the leaseSet of the client itself. * + * @return non-null. all the leaseSets known to all of the client netDbs * @since 0.9.60 */ public Set getLeasesKnownToClients() { @@ -181,7 +189,8 @@ public Set getLeasesKnownToClients() { } /** * Check if the mainNetDB needs to reseed - * + * + * @return non-null. * @since 0.9.60 * */ public ReseedChecker reseedChecker() { @@ -190,14 +199,16 @@ public ReseedChecker reseedChecker() { /** * For console ConfigKeyringHelper * + * @return non-null * @since 0.9.60 */ - public List lookupClientBySigningPublicKey(SigningPublicKey spk) { + public List lookupClientBySigningPublicKey(SigningPublicKey spk) { return Collections.emptyList(); } /** * For console ConfigKeyringHelper * + * @return non-null * @since 0.9.60 */ public List getLocalClientsBlindData() { diff --git a/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java b/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java index e512f9ea38..79133cf171 100644 --- a/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java +++ b/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java @@ -10,6 +10,7 @@ import net.i2p.router.JobImpl; import net.i2p.router.RouterContext; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor; /** start up the network database */ class BootNetworkDbJob extends JobImpl { @@ -21,6 +22,6 @@ public BootNetworkDbJob(RouterContext ctx) { public String getName() { return "Boot Network Database"; } public void runJob() { - getContext().netDb().startup(); + getContext().netDbSegmentor().startup(); } } diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java index 3edb6fbf30..d17773c55b 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java +++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java @@ -242,13 +242,13 @@ public void distribute(I2NPMessage msg, Hash target, TunnelId tunnel) { // Handling of client tunnel messages need explicit handling // in the context of the client subDb. if (_client != null) { - String dbid = _context.netDbSegmentor().getDbidByHash(_client); - if (dbid == null) { + //Hash dbid = _context.netDbSegmentor().getDbidByHash(_client); + /*if (dbid == null) { // This error shouldn't occur. All clients should have their own netDb. if (_log.shouldLog(Log.ERROR)) _log.error("Error, client (" + _clientNickname + ") dbid not found while processing messages in the IBMD."); return; - } + }*/ // For now, the only client message we know how to handle here is a DSM. // There aren't normally DSM messages here, but it should be safe to store // them in the client netDb. @@ -391,7 +391,7 @@ public void handleClove(DeliveryInstructions instructions, I2NPMessage data) { _log.info("Storing garlic LS down tunnel for: " + dsm.getKey() + " sent to: " + _clientNickname + " (" + (_client != null ? _client.toBase32() : ") router")); - if (_client.toBase32() != null) { + if (_client != null) { // We need to replicate some of the handling that was previously // performed when these types of messages were passed back to // the inNetMessagePool.