Skip to content

Commit 628d9d9

Browse files
committed
new attacks implemented
1 parent a4e3fdb commit 628d9d9

14 files changed

+357
-53
lines changed

nebula/addons/attacks/attacks.py

+2
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ def create_attack(engine) -> Attack:
111111
AttackException: If the specified attack name is not found in the `ATTACK_MAP`.
112112
"""
113113
from nebula.addons.attacks.communications.delayerattack import DelayerAttack
114+
from nebula.addons.attacks.communications.floodingattack import FloodingAttack
114115
from nebula.addons.attacks.dataset.datapoison import SamplePoisoningAttack
115116
from nebula.addons.attacks.dataset.labelflipping import LabelFlippingAttack
116117
from nebula.addons.attacks.model.gllneuroninversion import GLLNeuronInversionAttack
@@ -123,6 +124,7 @@ def create_attack(engine) -> Attack:
123124
"Noise Injection": NoiseInjectionAttack,
124125
"Swapping Weights": SwappingWeightsAttack,
125126
"Delayer": DelayerAttack,
127+
"Flooding": FloodingAttack,
126128
"Label Flipping": LabelFlippingAttack,
127129
"Sample Poisoning": SamplePoisoningAttack,
128130
"Model Poisoning": ModelPoisonAttack,
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,35 @@
11
import logging
22
import types
33
from abc import abstractmethod
4+
import random
45

56
from nebula.addons.attacks.attacks import Attack
67

78

89
class CommunicationAttack(Attack):
9-
def __init__(self, engine, target_class, target_method, round_start_attack, round_stop_attack, decorator_args=None):
10+
def __init__(self, engine,
11+
target_class,
12+
target_method,
13+
round_start_attack,
14+
round_stop_attack,
15+
attack_interval,
16+
decorator_args=None,
17+
selectivity_percentage: int = 100,
18+
selection_interval: int = None
19+
):
1020
super().__init__()
1121
self.engine = engine
1222
self.target_class = target_class
1323
self.target_method = target_method
1424
self.decorator_args = decorator_args
1525
self.round_start_attack = round_start_attack
1626
self.round_stop_attack = round_stop_attack
27+
self.attack_interval = attack_interval
1728
self.original_method = getattr(target_class, target_method, None)
29+
self.selectivity_percentage = selectivity_percentage
30+
self.selection_interval = selection_interval
31+
self.last_selection_round = 0
32+
self.targets = set()
1833

1934
if not self.original_method:
2035
raise AttributeError(f"Method {target_method} not found in class {target_class}")
@@ -24,10 +39,28 @@ def decorator(self, *args):
2439
"""Decorator that adds malicious behavior to the execution of the original method."""
2540
pass
2641

42+
async def select_targets(self):
43+
if self.selectivity_percentage != 100:
44+
if self.selection_interval:
45+
if self.last_selection_round % self.selection_interval == 0:
46+
logging.info("Recalculating targets...")
47+
all_nodes = await self.engine.cm.get_addrs_current_connections(only_direct=True)
48+
num_targets = max(1, int(len(all_nodes) * (self.selectivity_percentage / 100)))
49+
self.targets = set(random.sample(list(all_nodes), num_targets))
50+
elif not self.targets:
51+
logging.info("Calculating targets...")
52+
all_nodes = await self.engine.cm.get_addrs_current_connections(only_direct=True)
53+
num_targets = max(1, int(len(all_nodes) * (self.selectivity_percentage / 100)))
54+
self.targets = set(random.sample(list(all_nodes), num_targets))
55+
else:
56+
logging.info("All neighbors selected as targets")
57+
self.targets = await self.engine.cm.get_addrs_current_connections(only_direct=True)
58+
59+
logging.info(f"Selected {self.selectivity_percentage}% targets from neighbors: {self.targets}")
60+
self.last_selection_round+=1
61+
2762
async def _inject_malicious_behaviour(self):
2863
"""Inject malicious behavior into the target method."""
29-
logging.info("Injecting malicious behavior")
30-
3164
decorated_method = self.decorator(self.decorator_args)(self.original_method)
3265

3366
setattr(
@@ -38,14 +71,18 @@ async def _inject_malicious_behaviour(self):
3871

3972
async def _restore_original_behaviour(self):
4073
"""Restore the original behavior of the target method."""
41-
logging.info(f"Restoring original behavior of {self.target_class}.{self.target_method}")
4274
setattr(self.target_class, self.target_method, self.original_method)
4375

4476
async def attack(self):
4577
"""Perform the attack logic based on the current round."""
46-
if self.engine.round == self.round_stop_attack:
47-
logging.info(f"[{self.__class__.__name__}] Restoring original behavior")
78+
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
79+
pass
80+
elif self.engine.round == self.round_stop_attack:
81+
logging.info(f"[{self.__class__.__name__}] Stoping attack")
4882
await self._restore_original_behaviour()
49-
elif self.engine.round == self.round_start_attack:
50-
logging.info(f"[{self.__class__.__name__}] Injecting malicious behavior")
83+
elif (self.engine.round == self.round_start_attack) or ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
84+
await self.select_targets()
85+
logging.info(f"[{self.__class__.__name__}] Performing attack")
5186
await self._inject_malicious_behaviour()
87+
else:
88+
await self._restore_original_behaviour()

nebula/addons/attacks/communications/delayerattack.py

+13-4
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,24 @@ def __init__(self, engine, attack_params: dict):
2222
self.delay = int(attack_params["delay"])
2323
round_start = int(attack_params["round_start_attack"])
2424
round_stop = int(attack_params["round_stop_attack"])
25+
attack_interval = int(attack_params["attack_interval"])
26+
self.target_percentage = int(attack_params["target_percentage"])
27+
self.selection_interval = int(attack_params["selection_interval"])
2528
except KeyError as e:
2629
raise ValueError(f"Missing required attack parameter: {e}")
2730
except ValueError:
2831
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
2932

3033
super().__init__(
3134
engine,
32-
engine._cm._propagator,
33-
"propagate",
35+
engine._cm,
36+
"send_model",
3437
round_start,
3538
round_stop,
39+
attack_interval,
3640
self.delay,
41+
self.target_percentage,
42+
self.selection_interval,
3743
)
3844

3945
def decorator(self, delay: int):
@@ -50,8 +56,11 @@ def decorator(self, delay: int):
5056
def decorator(func):
5157
@wraps(func)
5258
async def wrapper(*args, **kwargs):
53-
logging.info(f"[DelayerAttack] Adding delay of {delay} seconds to {func.__name__}")
54-
await asyncio.sleep(delay)
59+
if len(args) > 1:
60+
dest_addr = args[1]
61+
if dest_addr in self.targets:
62+
logging.info(f"[DelayerAttack] Delaying model propagation to {dest_addr} by {delay} seconds")
63+
await asyncio.sleep(delay)
5564
_, *new_args = args # Exclude self argument
5665
return await func(*new_args)
5766

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import logging
2+
from functools import wraps
3+
4+
from nebula.addons.attacks.communications.communicationattack import CommunicationAttack
5+
6+
7+
class FloodingAttack(CommunicationAttack):
8+
"""
9+
Implements an attack that delays the execution of a target method by a specified amount of time.
10+
"""
11+
12+
def __init__(self, engine, attack_params: dict):
13+
"""
14+
Initializes the DelayerAttack with the engine and attack parameters.
15+
16+
Args:
17+
engine: The engine managing the attack context.
18+
attack_params (dict): Parameters for the attack, including the delay duration.
19+
"""
20+
try:
21+
round_start = int(attack_params["round_start_attack"])
22+
round_stop = int(attack_params["round_stop_attack"])
23+
attack_interval = int(attack_params["attack_interval"])
24+
self.flooding_factor = int(attack_params["flooding_factor"])
25+
self.target_percentage = int(attack_params["target_percentage"])
26+
self.selection_interval = int(attack_params["selection_interval"])
27+
except KeyError as e:
28+
raise ValueError(f"Missing required attack parameter: {e}")
29+
except ValueError:
30+
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
31+
32+
self.verbose = False
33+
34+
super().__init__(
35+
engine,
36+
engine._cm,
37+
"send_message",
38+
round_start,
39+
round_stop,
40+
attack_interval,
41+
self.flooding_factor,
42+
self.target_percentage,
43+
self.selection_interval,
44+
)
45+
46+
def decorator(self, flooding_factor: int):
47+
"""
48+
Decorator that adds a delay to the execution of the original method.
49+
50+
Args:
51+
flooding_factor (int): The number of times to repeat the function execution.
52+
53+
Returns:
54+
function: A decorator function that wraps the target method with the delay logic.
55+
"""
56+
57+
def decorator(func):
58+
@wraps(func)
59+
async def wrapper(*args, **kwargs):
60+
if len(args) > 1:
61+
dest_addr = args[1]
62+
if dest_addr in self.targets:
63+
logging.info(f"[FloodingAttack] Flooding message to {dest_addr} by {flooding_factor} times")
64+
for i in range(flooding_factor):
65+
if self.verbose:
66+
logging.info(f"[FloodingAttack] Sending duplicate {i+1}/{flooding_factor} to {dest_addr}")
67+
_, *new_args = args # Exclude self argument
68+
await func(*new_args, **kwargs)
69+
_, *new_args = args # Exclude self argument
70+
return await func(*new_args)
71+
72+
return wrapper
73+
74+
return decorator

nebula/addons/attacks/dataset/datapoison.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,22 @@ def __init__(self, engine, attack_params):
4646
engine (object): The training engine object.
4747
attack_params (dict): Dictionary of attack parameters.
4848
"""
49-
super().__init__(engine)
49+
try:
50+
round_start = int(attack_params["round_start_attack"])
51+
round_stop = int(attack_params["round_stop_attack"])
52+
attack_interval = int(attack_params["attack_interval"])
53+
except KeyError as e:
54+
raise ValueError(f"Missing required attack parameter: {e}")
55+
except ValueError:
56+
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
57+
58+
super().__init__(engine, round_start, round_stop, attack_interval)
5059
self.datamodule = engine._trainer.datamodule
5160
self.poisoned_percent = float(attack_params["poisoned_percent"])
5261
self.poisoned_ratio = float(attack_params["poisoned_ratio"])
5362
self.targeted = attack_params["targeted"]
5463
self.target_label = int(attack_params["target_label"])
5564
self.noise_type = attack_params["noise_type"]
56-
self.round_start_attack = int(attack_params["round_start_attack"])
57-
self.round_stop_attack = int(attack_params["round_stop_attack"])
5865

5966
def apply_noise(self, t, noise_type, poisoned_ratio):
6067
"""

nebula/addons/attacks/dataset/datasetattack.py

+10-7
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,17 @@ class DatasetAttack(Attack):
1313
data, potentially impacting the model's training process.
1414
"""
1515

16-
def __init__(self, engine):
16+
def __init__(self, engine, round_start_attack, round_stop_attack, attack_interval):
1717
"""
1818
Initializes the DatasetAttack with the given engine.
1919
2020
Args:
2121
engine: The engine managing the attack context.
2222
"""
2323
self.engine = engine
24-
self.round_start_attack = 0
25-
self.round_stop_attack = 10
24+
self.round_start_attack = round_start_attack
25+
self.round_stop_attack = round_stop_attack
26+
self.attack_interval = attack_interval
2627

2728
async def attack(self):
2829
"""
@@ -32,11 +33,13 @@ async def attack(self):
3233
with a malicious dataset. The attack is stopped when the engine reaches the
3334
designated stop round.
3435
"""
35-
if self.engine.round in range(self.round_start_attack, self.round_stop_attack):
36-
logging.info("[DatasetAttack] Performing attack")
36+
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
37+
pass
38+
elif self.engine.round == self.round_stop_attack:
39+
logging.info(f"[{self.__class__.__name__}] Stopping attack")
40+
elif self.engine.round >= self.round_start_attack and ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
41+
logging.info(f"[{self.__class__.__name__}] Performing attack")
3742
self.engine.trainer.datamodule.train_set = self.get_malicious_dataset()
38-
elif self.engine.round == self.round_stop_attack + 1:
39-
logging.info("[DatasetAttack] Stopping attack")
4043

4144
async def _inject_malicious_behaviour(self, target_function, *args, **kwargs):
4245
"""

nebula/addons/attacks/dataset/labelflipping.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,21 @@ def __init__(self, engine, attack_params):
3232
attack_params (dict): Parameters for the attack, including the percentage of
3333
poisoned data, targeting options, and label specifications.
3434
"""
35-
super().__init__(engine)
35+
try:
36+
round_start = int(attack_params["round_start_attack"])
37+
round_stop = int(attack_params["round_stop_attack"])
38+
attack_interval = int(attack_params["attack_interval"])
39+
except KeyError as e:
40+
raise ValueError(f"Missing required attack parameter: {e}")
41+
except ValueError:
42+
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
43+
44+
super().__init__(engine, round_start, round_stop, attack_interval)
3645
self.datamodule = engine._trainer.datamodule
3746
self.poisoned_percent = float(attack_params["poisoned_percent"])
3847
self.targeted = attack_params["targeted"]
3948
self.target_label = int(attack_params["target_label"])
4049
self.target_changed_label = int(attack_params["target_changed_label"])
41-
self.round_start_attack = int(attack_params["round_start_attack"])
42-
self.round_stop_attack = int(attack_params["round_stop_attack"])
4350

4451
def labelFlipping(
4552
self,

nebula/addons/attacks/model/gllneuroninversion.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,16 @@ def __init__(self, engine, attack_params):
2626
engine (object): The training engine object.
2727
_ (any): A placeholder argument (not used in this class).
2828
"""
29-
super().__init__(engine)
30-
self.round_start_attack = int(attack_params["round_start_attack"])
31-
self.round_stop_attack = int(attack_params["round_stop_attack"])
29+
try:
30+
round_start = int(attack_params["round_start_attack"])
31+
round_stop = int(attack_params["round_stop_attack"])
32+
attack_interval = int(attack_params["attack_interval"])
33+
except KeyError as e:
34+
raise ValueError(f"Missing required attack parameter: {e}")
35+
except ValueError:
36+
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
37+
38+
super().__init__(engine, round_start, round_stop, attack_interval)
3239

3340
def model_attack(self, received_weights):
3441
"""

nebula/addons/attacks/model/modelattack.py

+12-9
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class ModelAttack(Attack):
2020
model aggregation.
2121
"""
2222

23-
def __init__(self, engine):
23+
def __init__(self, engine, round_start_attack, round_stop_attack, attack_interval):
2424
"""
2525
Initializes the ModelAttack with the specified engine.
2626
@@ -31,8 +31,9 @@ def __init__(self, engine):
3131
self.engine = engine
3232
self.aggregator = engine._aggregator
3333
self.original_aggregation = engine.aggregator.run_aggregation
34-
self.round_start_attack = 0
35-
self.round_stop_attack = 10
34+
self.round_start_attack = round_start_attack
35+
self.round_stop_attack = round_stop_attack
36+
self.attack_interval = attack_interval
3637

3738
def aggregator_decorator(self):
3839
"""
@@ -104,11 +105,13 @@ async def attack(self):
104105
105106
This method logs the attack and calls the method to modify the aggregator.
106107
"""
107-
if self.engine.round == self.round_start_attack:
108-
logging.info("[ModelAttack] Injecting malicious behaviour")
108+
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
109+
pass
110+
elif self.engine.round == self.round_stop_attack:
111+
logging.info(f"[{self.__class__.__name__}] Stopping attack")
112+
await self._restore_original_behaviour()
113+
elif (self.engine.round == self.round_start_attack) or ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
114+
logging.info(f"[{self.__class__.__name__}] Performing attack")
109115
await self._inject_malicious_behaviour()
110-
elif self.engine.round == self.round_stop_attack + 1:
111-
logging.info("[ModelAttack] Stopping attack")
116+
else:
112117
await self._restore_original_behaviour()
113-
elif self.engine.round in range(self.round_start_attack, self.round_stop_attack):
114-
logging.info("[ModelAttack] Performing attack")

nebula/addons/attacks/model/modelpoison.py

+11-3
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,19 @@ def __init__(self, engine, attack_params):
4141
engine (object): The training engine object.
4242
attack_params (dict): Dictionary of attack parameters.
4343
"""
44-
super().__init__(engine)
44+
try:
45+
round_start = int(attack_params["round_start_attack"])
46+
round_stop = int(attack_params["round_stop_attack"])
47+
attack_interval = int(attack_params["attack_interval"])
48+
except KeyError as e:
49+
raise ValueError(f"Missing required attack parameter: {e}")
50+
except ValueError:
51+
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
52+
53+
super().__init__(engine, round_start, round_stop, attack_interval)
54+
4555
self.poisoned_ratio = float(attack_params["poisoned_ratio"])
4656
self.noise_type = attack_params["noise_type"].lower()
47-
self.round_start_attack = int(attack_params["round_start_attack"])
48-
self.round_stop_attack = int(attack_params["round_stop_attack"])
4957

5058
def modelPoison(self, model: OrderedDict, poisoned_ratio, noise_type="gaussian"):
5159
"""

0 commit comments

Comments
 (0)