Skip to content

Commit 4736c7e

Browse files
committed
Refactor: Rename variable poisoned_persent to poisoned_percent
- Fixed typo in the variable name for improved clarity. - Added descriptions to the attacks in the code for better understanding of functionality.
1 parent 70de243 commit 4736c7e

File tree

7 files changed

+53
-10
lines changed

7 files changed

+53
-10
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
"""
2+
Utilities for data poisoning techniques in machine learning. It includes three main components:
3+
4+
1. **Data Poisoning (`datapoisoning.py`)**: Functions to apply various types of noise to training datasets, either randomly or targeted, altering the data to test the model's resilience against corrupted inputs.
5+
6+
2. **Model Poisoning (`modelpoisoning.py`)**: A utility for injecting noise directly into a model's parameters, simulating attacks on model integrity by modifying the underlying weights and biases.
7+
8+
3. **Label Flipping (`labelflipping.py`)**: Functions to randomly change labels within a dataset or target specific labels for modification, effectively simulating label corruption to evaluate the impact on model performance.
9+
10+
Together, these components provide a comprehensive toolkit for researching and implementing poisoning attacks in machine learning systems, aiding in the development of more robust models.
11+
"""

nebula/addons/attacks/poisoning/datapoison.py

+12
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,15 @@
1+
"""
2+
This module contains functions for applying data poisoning techniques,
3+
including the application of noise to tensors and modification of datasets
4+
to simulate poisoning attacks.
5+
6+
Functions:
7+
- apply_noise: Applies noise to a tensor based on the specified noise type and poisoning ratio.
8+
- datapoison: Adds noise to a specified portion of a dataset for data poisoning purposes.
9+
- add_x_to_image: Adds an 'X' mark to the top-left corner of an image.
10+
- poison_to_nlp_rawdata: Poisons NLP data by setting word vectors to zero with a given probability.
11+
"""
12+
113
import copy
214
import random
315

nebula/addons/attacks/poisoning/labelflipping.py

+9
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
1+
"""
2+
This module provides a function for label flipping in datasets, allowing for the simulation of label noise
3+
as a form of data poisoning. The main function modifies the labels of specific samples in a dataset based
4+
on a specified percentage and target conditions.
5+
6+
Function:
7+
- labelFlipping: Flips the labels of a specified portion of a dataset to random values or to a specific target label.
8+
"""
9+
110
import copy
211
import random
312

nebula/addons/attacks/poisoning/modelpoison.py

+11
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,14 @@
1+
"""
2+
This module provides a function for adding noise to a machine learning model's parameters, simulating
3+
data poisoning attacks. The main function allows for the injection of various types of noise into
4+
the model parameters, effectively altering them to test the model's robustness against malicious
5+
manipulations.
6+
7+
Function:
8+
- modelpoison: Modifies the parameters of a model by injecting noise according to a specified ratio
9+
and type of noise (e.g., Gaussian, salt, salt-and-pepper).
10+
"""
11+
112
from collections import OrderedDict
213

314
import torch

nebula/core/datasets/changeablesubset.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def __init__(
1111
indices,
1212
label_flipping=False,
1313
data_poisoning=False,
14-
poisoned_persent=0,
14+
poisoned_percent=0,
1515
poisoned_ratio=0,
1616
targeted=False,
1717
target_label=0,
@@ -24,7 +24,7 @@ def __init__(
2424
self.indices = indices
2525
self.label_flipping = label_flipping
2626
self.data_poisoning = data_poisoning
27-
self.poisoned_persent = poisoned_persent
27+
self.poisoned_percent = poisoned_percent
2828
self.poisoned_ratio = poisoned_ratio
2929
self.targeted = targeted
3030
self.target_label = target_label
@@ -35,7 +35,7 @@ def __init__(
3535
self.dataset = labelFlipping(
3636
self.dataset,
3737
self.indices,
38-
self.poisoned_persent,
38+
self.poisoned_percent,
3939
self.targeted,
4040
self.target_label,
4141
self.target_changed_label,
@@ -44,7 +44,7 @@ def __init__(
4444
self.dataset = datapoison(
4545
self.dataset,
4646
self.indices,
47-
self.poisoned_persent,
47+
self.poisoned_percent,
4848
self.poisoned_ratio,
4949
self.targeted,
5050
self.target_label,

nebula/core/datasets/datamodule.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def __init__(
2525
val_percent=0.1,
2626
label_flipping=False,
2727
data_poisoning=False,
28-
poisoned_persent=0,
28+
poisoned_percent=0,
2929
poisoned_ratio=0,
3030
targeted=False,
3131
target_label=0,
@@ -46,7 +46,7 @@ def __init__(
4646
self.val_percent = val_percent
4747
self.label_flipping = label_flipping
4848
self.data_poisoning = data_poisoning
49-
self.poisoned_persent = poisoned_persent
49+
self.poisoned_percent = poisoned_percent
5050
self.poisoned_ratio = poisoned_ratio
5151
self.targeted = targeted
5252
self.target_label = target_label
@@ -70,7 +70,7 @@ def setup(self, stage=None):
7070
self.train_set_indices,
7171
label_flipping=self.label_flipping,
7272
data_poisoning=self.data_poisoning,
73-
poisoned_persent=self.poisoned_persent,
73+
poisoned_percent=self.poisoned_percent,
7474
poisoned_ratio=self.poisoned_ratio,
7575
targeted=self.targeted,
7676
target_label=self.target_label,

nebula/node.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ async def main(config):
6565
additional_node_round = config.participant["mobility_args"]["additional_node"]["round_start"]
6666

6767
attacks = config.participant["adversarial_args"]["attacks"]
68-
poisoned_persent = config.participant["adversarial_args"]["poisoned_sample_percent"]
68+
poisoned_percent = config.participant["adversarial_args"]["poisoned_sample_percent"]
6969
poisoned_ratio = config.participant["adversarial_args"]["poisoned_ratio"]
7070
targeted = str(config.participant["adversarial_args"]["targeted"])
7171
target_label = config.participant["adversarial_args"]["target_label"]
@@ -96,7 +96,7 @@ async def main(config):
9696
label_flipping = False
9797
data_poisoning = False
9898
targeted = False
99-
poisoned_persent = 0
99+
poisoned_percent = 0
100100
poisoned_ratio = 0
101101

102102
# Adjust the total number of nodes and the index of the current node for CFL, as it doesn't require a specific partition for the server (not used for training)
@@ -261,7 +261,7 @@ async def main(config):
261261
batch_size=dataset.batch_size,
262262
label_flipping=label_flipping,
263263
data_poisoning=data_poisoning,
264-
poisoned_persent=poisoned_persent,
264+
poisoned_percent=poisoned_percent,
265265
poisoned_ratio=poisoned_ratio,
266266
targeted=targeted,
267267
target_label=target_label,

0 commit comments

Comments
 (0)