diff --git a/README.md b/README.md index ab8ec9d..2b9dacc 100644 --- a/README.md +++ b/README.md @@ -26,18 +26,18 @@ For a lack of a better name, this repository calls these algorithms: The following describes how to use this repository in your own implementation. ## The set function -To use this in your own code, your function to be maximized should be contained in an object of a class inheriting from `AbstractObjectiveFunction`. This class looks as follows: +To use this in your own code, your function to be maximized should be contained in an object of a class inheriting from `AbstractSubmodularFunction`. This class looks as follows: ``` Python -class AbstractObjectiveFunction: +class AbstractSubmodularFunction: def evaluate(self, input_set: Set[E]) -> float: raise NotImplementedError('Abstract Method') ``` -That is, `AbstractObjectiveFunction` requires its subclasses to implement an `evaluate()` method, taking as input a `Set[E]` and resulting in a `float`. This method should evaluate the set function on the given set, returning the value of the function. This class corresponds to the *'value oracle'*, which should be able to return the value of the function to be maximixed for every possible subset of the *ground set*. +That is, `AbstractSubmodularFunction` requires its subclasses to implement an `evaluate()` method, taking as input a `Set[E]` and resulting in a `float`. This method should evaluate the set function on the given set, returning the value of the function. This class corresponds to the *'value oracle'*, which should be able to return the value of the function to be maximixed for every possible subset of the *ground set*. -Typically, your own class inheriting `AbstractObjectiveFunction` can contain instance variables for parameters required by the objective function. +Typically, your own class inheriting `AbstractSubmodularFunction` can contain instance variables for parameters required by the objective function. ## The Optimizers -Every included optimizer inherits the class `AbstractOptimizer`. Each optimizer should be iniitialized with at least two arguments: +Every included optimizer inherits the class `AbstractOptimizer`. Each optimizer should be initialized with at least two arguments: 1. the objective function to be optimized 2. the ground set of items. The optimizers will search over the power set of this ground set. @@ -45,8 +45,8 @@ The following shows the definition of the `AbstractOptimizer` class: ``` Python class AbstractOptimizer: - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug: bool = True): - self.objective_function: AbstractObjectiveFunction = objective_function + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug: bool = True): + self.objective_function: AbstractSubmodularFunction = objective_function self.ground_set: Set[E] = ground_set self.debug: bool = debug @@ -93,7 +93,7 @@ Some good references for submodular maximization > > Buchbinder, N., & Feldman, M. (2019). Submodular Functions Maximization Problems. Handbook of Approximation Algorithms and Metaheuristics, Second Edition, 753–788. https://doi.org/10.1201/9781351236423-42 -Andreas Krause and Carlos Guestrin maintain a [great website about submodular optimization and the submodularity property](https://las.inf.ethz.ch/submodularity/) +Andreas Krause and Carlos Guestrin maintain a [great website about submodular optimization and the submodularity property](https://las.inf.ethz.ch/submodularity/), linking to their [Matlab/Octave toolbox for Submodular Function Optimization](https://las.inf.ethz.ch/sfo/index.html). Jan Vondrak hosts the [slides for some great presentations he did about submodular functions on his website.](https://theory.stanford.edu/~jvondrak/presentations.html) diff --git a/submodmax/abstract_optimizer.py b/submodmax/abstract_optimizer.py index 2751e0b..09278f5 100644 --- a/submodmax/abstract_optimizer.py +++ b/submodmax/abstract_optimizer.py @@ -3,14 +3,14 @@ E = TypeVar('E') -class AbstractObjectiveFunction: +class AbstractSubmodularFunction: def evaluate(self, input_set: Set[E]) -> float: raise NotImplementedError('Abstract Method') class AbstractOptimizer: - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug: bool = True): - self.objective_function: AbstractObjectiveFunction = objective_function + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug: bool = True): + self.objective_function: AbstractSubmodularFunction = objective_function self.ground_set: Set[E] = ground_set self.debug: bool = debug diff --git a/submodmax/deterministic_double_greedy_search.py b/submodmax/deterministic_double_greedy_search.py index 91e1ca0..7bf3036 100644 --- a/submodmax/deterministic_double_greedy_search.py +++ b/submodmax/deterministic_double_greedy_search.py @@ -1,6 +1,6 @@ from typing import Set, TypeVar -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction E = TypeVar('E') @@ -23,7 +23,7 @@ class DeterministicDoubleGreedySearch(AbstractOptimizer): """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug: bool = True): + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug: bool = True): super().__init__(objective_function, ground_set, debug) def optimize(self) -> Set[E]: diff --git a/submodmax/deterministic_local_search.py b/submodmax/deterministic_local_search.py index d3c2c78..7143201 100644 --- a/submodmax/deterministic_local_search.py +++ b/submodmax/deterministic_local_search.py @@ -1,6 +1,6 @@ from typing import Set, Tuple, Optional, TypeVar -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction E = TypeVar('E') @@ -27,7 +27,7 @@ class DeterministicLocalSearch(AbstractOptimizer): FOCS paper: https://people.csail.mit.edu/mirrokni/focs07.pdf (page 4-5) """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], epsilon: float = 0.05, + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], epsilon: float = 0.05, debug: bool = True): super().__init__(objective_function, ground_set, debug) self.epsilon: float = epsilon diff --git a/submodmax/deterministic_local_search_pyids.py b/submodmax/deterministic_local_search_pyids.py index 2b1f687..d8a68e2 100644 --- a/submodmax/deterministic_local_search_pyids.py +++ b/submodmax/deterministic_local_search_pyids.py @@ -1,6 +1,6 @@ from typing import Set, Tuple, Optional, TypeVar -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction E = TypeVar('E') @@ -28,7 +28,7 @@ class DeterministicLocalSearchPyIDS(AbstractOptimizer): This implementation is largely based on the one from Jiri Filip and Tomas Kliegr included in PyIDS. """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], epsilon=0.05, + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], epsilon=0.05, debug: bool = True): super().__init__(objective_function, ground_set, debug) self.epsilon: float = epsilon diff --git a/submodmax/randomized_double_greedy_search.py b/submodmax/randomized_double_greedy_search.py index 16a8bd4..2c622e5 100644 --- a/submodmax/randomized_double_greedy_search.py +++ b/submodmax/randomized_double_greedy_search.py @@ -3,7 +3,7 @@ import numpy as np -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction E = TypeVar('E') @@ -30,7 +30,7 @@ class RandomizedDoubleGreedySearch(AbstractOptimizer): """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug: bool = True): + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug: bool = True): super().__init__(objective_function, ground_set, debug) def optimize(self) -> Set[E]: diff --git a/submodmax/smooth_local_search.py b/submodmax/smooth_local_search.py index 3023e26..3f32a48 100644 --- a/submodmax/smooth_local_search.py +++ b/submodmax/smooth_local_search.py @@ -4,7 +4,7 @@ import numpy as np -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction from .random_set import sample_a_set_with_bias_delta_on_A E = TypeVar('E') @@ -27,7 +27,7 @@ class SmoothLocalSearch(AbstractOptimizer): Note: the problem of maximizing a submodular function is NP-hard. """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug=True): + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug=True): super().__init__(objective_function, ground_set, debug) self.empty_set: Set[E] = set() diff --git a/submodmax/smooth_local_search_pyids.py b/submodmax/smooth_local_search_pyids.py index 89d89b7..73f2b2c 100644 --- a/submodmax/smooth_local_search_pyids.py +++ b/submodmax/smooth_local_search_pyids.py @@ -4,7 +4,7 @@ import numpy as np -from .abstract_optimizer import AbstractOptimizer, AbstractObjectiveFunction +from .abstract_optimizer import AbstractOptimizer, AbstractSubmodularFunction from .random_set import sample_a_set_with_bias_delta_on_A, RandomSetOptimizer E = TypeVar('E') @@ -29,7 +29,7 @@ class SmoothLocalSearchPyIDS(AbstractOptimizer): This implementation is largely based on the one from Jiri Filip and Tomas Kliegr included in PyIDS. """ - def __init__(self, objective_function: AbstractObjectiveFunction, ground_set: Set[E], debug: bool = True): + def __init__(self, objective_function: AbstractSubmodularFunction, ground_set: Set[E], debug: bool = True): super().__init__(objective_function, ground_set, debug) self.rs_optimizer = RandomSetOptimizer(ground_set)