Skip to content

Commit

Permalink
Merge branch '465-subdb-ri' into 'master'
Browse files Browse the repository at this point in the history
NetDB: Enforce no RIs in subdbs (Gitlab #465)

Closes #465

See merge request i2p-hackers/i2p.i2p!143
  • Loading branch information
zzz committed Nov 2, 2023
2 parents 291e630 + 4ed709a commit 9d10369
Show file tree
Hide file tree
Showing 6 changed files with 112 additions and 105 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ public FloodfillNetworkDatabaseFacade(RouterContext context, Hash dbid) {
// for ISJ
_context.statManager().createRateStat("netDb.RILookupDirect", "Was an iterative RI lookup sent directly?", "NetworkDatabase", rate);
// No need to start the FloodfillMonitorJob for client subDb.
if (!isMainDb())
if (isClientDb())
_ffMonitor = null;
else
_ffMonitor = new FloodfillMonitorJob(_context, this);
Expand All @@ -107,7 +107,7 @@ public synchronized void startup() {
super.startup();
if (_ffMonitor != null)
_context.jobQueue().addJob(_ffMonitor);
if (!isMainDb()) {
if (isClientDb()) {
isFF = false;
} else {
isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
Expand All @@ -116,7 +116,7 @@ public synchronized void startup() {
}

long down = _context.router().getEstimatedDowntime();
if (!_context.commSystem().isDummy() && isMainDb() &&
if (!_context.commSystem().isDummy() && !isClientDb() &&
(down == 0 || (!isFF && down > 30*60*1000) || (isFF && down > 24*60*60*1000))) {
// refresh old routers
Job rrj = new RefreshRoutersJob(_context, this);
Expand All @@ -128,7 +128,7 @@ public synchronized void startup() {
@Override
protected void createHandlers() {
// Only initialize the handlers for the flooodfill netDb.
if (isMainDb()) {
if (!isClientDb()) {
if (_log.shouldInfo())
_log.info("[dbid: " + super._dbid + "] Initializing the message handlers");
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this));
Expand Down Expand Up @@ -435,13 +435,6 @@ public void runJob() {
}
}

@Override
protected PeerSelector createPeerSelector() {
if (_peerSelector != null)
return _peerSelector;
return new FloodfillPeerSelector(_context);
}

/**
* Public, called from console. This wakes up the floodfill monitor,
* which will rebuild the RI and log in the event log,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,11 +148,11 @@ public void runJob() {
}

// garlic encrypt to hide contents from the OBEP
RouterInfo peer = _facade.lookupRouterInfoLocally(_target);
RouterInfo peer = ctx.netDb().lookupRouterInfoLocally(_target);
if (peer == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("(JobId: " + getJobId()
+ "; dbid: " + _facade._dbid
+ "; db: " + _facade
+ ") Fail looking up RI locally for target " + _target);
_facade.verifyFinished(_key);
return;
Expand Down Expand Up @@ -245,7 +245,7 @@ public void runJob() {
}

if (_log.shouldLog(Log.INFO))
_log.info("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
_log.info("[JobId: " + getJobId() + "; db: " + _facade
+ "]: Starting verify (stored " + _key + " to " + _sentTo + "), asking " + _target);
_sendTime = ctx.clock().now();
_expiration = _sendTime + VERIFY_TIMEOUT;
Expand Down Expand Up @@ -277,7 +277,7 @@ private Hash pickTarget() {
if (peers.isEmpty())
break;
Hash peer = peers.get(0);
RouterInfo ri = _facade.lookupRouterInfoLocally(peer);
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
//if (ri != null && StoreJob.supportsCert(ri, keyCert)) {
if (ri != null && StoreJob.shouldStoreTo(ri) &&
//(!_isLS2 || (StoreJob.shouldStoreLS2To(ri) &&
Expand Down Expand Up @@ -334,18 +334,7 @@ public boolean isMatch(I2NPMessage message) {
return _key.equals(dsm.getKey());
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
boolean rv = _key.equals(dsrm.getSearchKey());
if (rv) {
if (_log.shouldInfo())
_log.info("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
+ "DSRM key match successful.");
} else {
if (_log.shouldWarn())
_log.warn("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
+ "]: DSRM key mismatch for key " + _key
+ " with DSRM: " + message);
}
return rv;
return _key.equals(dsrm.getSearchKey());
}
return false;
}
Expand Down Expand Up @@ -413,21 +402,16 @@ public void runJob() {
// assume 0 old, all new, 0 invalid, 0 dup
pm.dbLookupReply(_target, 0,
dsrm.getNumReplies(), 0, 0, delay);
// ToDo: Clarify the following log message.
// This message is phrased in a manner that draws attention, and indicates
// the possibility of a problem that may need follow-up. But examination
// of the code did not provide insight as to what is being verified,
// and what is failing. This message will be displayed unconditionally
// every time a DSRM is handled here.
// The peer we asked did not have the key, so _sentTo failed to flood it
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": DSRM verify failed (dbid: "
+ _facade._dbid + ") for " + _key);
_log.warn(getJobId() + ": DSRM verify failed (db: "
+ _facade + ") for " + _key);
// only for RI... LS too dangerous?
if (_isRouterInfo) {
if (_facade.isClientDb())
if (_log.shouldLog(Log.WARN))
_log.warn("[Jobid: " + getJobId() + "; dbid: " + _facade._dbid
+ "Warning! Client is starting a SingleLookupJob (DIRECT?) for RouterInfo");
_log.warn("[Jobid: " + getJobId() + "; db: " + _facade
+ "] Warning! Client is starting a SingleLookupJob (DIRECT?) for RouterInfo");
ctx.jobQueue().addJob(new SingleLookupJob(ctx, dsrm));
}
}
Expand Down Expand Up @@ -458,12 +442,7 @@ public void runJob() {
* So at least we'll try THREE ffs round-robin if things continue to fail...
*/
private void resend() {
// It's safe to check the default netDb first, but if the lookup is for
// a client, nearly all RI is expected to be found in the FF netDb.
DatabaseEntry ds = _facade.lookupLocally(_key);
if ((ds == null) && _facade.isClientDb() && _isRouterInfo)
// It's safe to check the floodfill netDb for RI
ds = getContext().netDb().lookupLocally(_key);
if (ds != null) {
// By the time we get here, a minute or more after the store started,
// we may have already started a new store
Expand Down
Loading

0 comments on commit 9d10369

Please sign in to comment.