Skip to content

Commit

Permalink
Router: Remove all refs to ban-by-lookup-flood code. Don't create thr…
Browse files Browse the repository at this point in the history
…ottlers in the FNDF unles sit's the main db
  • Loading branch information
eyedeekay committed Oct 16, 2023
1 parent 195b4b9 commit a2446e1
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 57 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -83,38 +83,6 @@ public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash from
_context.statManager().addRateData("netDb.nonFFLookupsDropped", 1);
return null;
}

// Implementation of the banning of routers based on excessive burst DLM
// is pending a reliable way to discriminate between DLM that are sent
// and replied directly, and DLM that are forwarded by a router OBEP.
/*****
if (_facade.shouldBanLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("[dbid: " + _facade._dbid
+ "] Possibly throttling " + dlm.getSearchType()
+ " lookup request for " + dlm.getSearchKey()
+ " because requests are being sent extremely fast, reply was to: "
+ dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.repeatedLookupsDropped", 1);
}
}
if (_facade.shouldBanBurstLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("[dbid: " + _facade._dbid
+ "] Banning " + dlm.getSearchType()
+ " lookup request for " + dlm.getSearchKey()
+ " because requests are being sent extremely fast in a very short time, reply was to: "
+ dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.repeatedBurstLookupsDropped", 1);
}
_context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests, burst", null,
_context.banlist().BANLIST_CODE_HARD, null,
_context.clock().now() + 4*60*60*1000);
_context.commSystem().mayDisconnect(dlm.getFrom());
_context.statManager().addRateData("netDb.lookupsDropped", 1);
return null;
}
*****/
if ((!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())
&& !_facade.shouldThrottleBurstLookup(dlm.getFrom(), dlm.getReplyTunnel()))
|| _context.routerHash().equals(dlm.getFrom())) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
private FloodThrottler _floodThrottler;
private LookupThrottler _lookupThrottler;
private LookupThrottler _lookupThrottlerBurst;
private LookupThrottler _lookupBanner;
private LookupThrottler _lookupBannerBurst;
private final Job _ffMonitor;
private final int BAN_LOOKUP_BASE = 75;
private final int BAN_LOOKUP_BASE_INTERVAL = 5*60*1000;
private final int BAN_LOOKUP_BURST = 10;
private final int BAN_LOOKUP_BURST_INTERVAL = 15*1000;
private final int DROP_LOOKUP_BURST = 10;
private final int DROP_LOOKUP_BURST_INTERVAL = 30*1000;

Expand Down Expand Up @@ -103,15 +97,15 @@ public synchronized void startup() {
super.startup();
if (_ffMonitor != null)
_context.jobQueue().addJob(_ffMonitor);
if (!super.isMainDb())
if (!super.isMainDb()){
isFF = false;
else
_lookupThrottler = null;
_lookupThrottlerBurst = null;
} else {
isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);

_lookupThrottler = new LookupThrottler();
_lookupBanner = new LookupThrottler(BAN_LOOKUP_BASE, BAN_LOOKUP_BASE_INTERVAL);
_lookupThrottlerBurst = new LookupThrottler(DROP_LOOKUP_BURST, DROP_LOOKUP_BURST_INTERVAL);
_lookupBannerBurst = new LookupThrottler(BAN_LOOKUP_BURST, BAN_LOOKUP_BURST_INTERVAL);
_lookupThrottler = new LookupThrottler();
_lookupThrottlerBurst = new LookupThrottler(DROP_LOOKUP_BURST, DROP_LOOKUP_BURST_INTERVAL);
}

long down = _context.router().getEstimatedDowntime();
if (!_context.commSystem().isDummy() &&
Expand Down Expand Up @@ -262,21 +256,11 @@ boolean shouldThrottleLookup(Hash from, TunnelId id) {
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
}

boolean shouldBanLookup(Hash from, TunnelId id) {
// null before startup
return _lookupBanner == null || _lookupBanner.shouldThrottle(from, id);
}

boolean shouldThrottleBurstLookup(Hash from, TunnelId id) {
// null before startup
return _lookupThrottler == null || _lookupThrottlerBurst.shouldThrottle(from, id);
}

boolean shouldBanBurstLookup(Hash from, TunnelId id) {
// null before startup
return _lookupBanner == null || _lookupBannerBurst.shouldThrottle(from, id);
}

/**
* If we are floodfill AND the key is not throttled,
* flood it, otherwise don't.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,11 +148,13 @@ else if (getContext().clientManager().isLocal(key))
blockStore = false;
if (blockStore) {
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
// throw rather than return, so that we send the ack below (prevent easy attack)
// If we're using subdbs, store the leaseSet in the multihome DB.
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
if (getContext().netDbSegmentor().useSubDbs())
getContext().multihomeNetDb().store(key, ls);
throw new IllegalArgumentException("(dbid: " + _facade._dbid
else
throw new IllegalArgumentException("(dbid: " + _facade._dbid
+ ") Peer attempted to store local leaseSet: "
+ key.toBase32());
}
Expand Down

0 comments on commit a2446e1

Please sign in to comment.