From ea81059c2594990cd67ab1d865573b9fec690251 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Thu, 2 Nov 2023 15:07:58 +0000 Subject: [PATCH] chore: correct escape sequences --- inspyred/benchmarks.py | 635 +++++++++++++++++++++-------------------- 1 file changed, 318 insertions(+), 317 deletions(-) diff --git a/inspyred/benchmarks.py b/inspyred/benchmarks.py index a902081..49cc081 100644 --- a/inspyred/benchmarks.py +++ b/inspyred/benchmarks.py @@ -2,7 +2,7 @@ ===================================================== :mod:`benchmarks` -- Benchmark optimization functions ===================================================== - + This module provides a set of benchmark problems for global optimization. .. Copyright 2012 Aaron Garrett @@ -23,8 +23,8 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - + THE SOFTWARE. + .. module:: benchmarks .. moduleauthor:: Aaron Garrett """ @@ -37,59 +37,60 @@ import itertools import math import random +from functools import reduce class Benchmark(object): """Defines a global optimization benchmark problem. - + This abstract class defines the basic structure of a global - optimization problem. Subclasses should implement the ``generator`` - and ``evaluator`` methods for a particular optimization problem, - which can be used with inspyred's evolutionary computations. - + optimization problem. Subclasses should implement the ``generator`` + and ``evaluator`` methods for a particular optimization problem, + which can be used with inspyred's evolutionary computations. + In addition to being used with evolutionary computations, subclasses - of this class are also callable. The arguments passed to such a call - are combined into a list and passed as the single candidate to the + of this class are also callable. The arguments passed to such a call + are combined into a list and passed as the single candidate to the evaluator method. The single calculated fitness is returned. What this means is that a given benchmark can act as a mathematical function that takes arguments and returns the value of the function, like the following example.:: - + my_function = benchmarks.Ackley(2) output = my_function(-1.5, 4.2) - + Public Attributes: - + - *dimensions* -- the number of inputs to the problem - *objectives* -- the number of outputs of the problem (default 1) - *bounder* -- the bounding function for the problem (default None) - - *maximize* -- whether the problem is one of maximization (default + - *maximize* -- whether the problem is one of maximization (default True) - + """ def __init__(self, dimensions, objectives=1): self.dimensions = dimensions self.objectives = objectives self.bounder = None self.maximize = True - + def __str__(self): if self.objectives > 1: return '{0} ({1} dimensions, {2} objectives)'.format(self.__class__.__name__, self.dimensions, self.objectives) else: return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.dimensions) - + def __repr__(self): return self.__class__.__name__ - + def generator(self, random, args): """The generator function for the benchmark problem.""" raise NotImplementedError - + def evaluator(self, candidates, args): """The evaluator function for the benchmark problem.""" raise NotImplementedError - + def __call__(self, *args, **kwargs): candidate = [a for a in args] fit = self.evaluator([candidate], kwargs) @@ -97,30 +98,30 @@ def __call__(self, *args, **kwargs): class Binary(Benchmark): """Defines a binary problem based on an existing benchmark problem. - + This class can be used to modify an existing benchmark problem to allow it to use a binary representation. The generator creates a list of binary values of size `dimensions`-by-`dimension_bits`. The evaluator accepts a candidate represented by such a binary list and transforms that candidate into a real-valued list as follows: - + 1. Each set of `dimension_bits` bits is converted to its positive - integer representation. - 2. Next, that integer value is divided by the maximum integer that - can be represented by `dimension_bits` bits to produce a real - number in the range [0, 1]. - 3. That real number is then scaled to the range [lower_bound, - upper_bound] for that dimension (which should be defined by + integer representation. + 2. Next, that integer value is divided by the maximum integer that + can be represented by `dimension_bits` bits to produce a real + number in the range [0, 1]. + 3. That real number is then scaled to the range [lower_bound, + upper_bound] for that dimension (which should be defined by the bounder). - + Public Attributes: - + - *benchmark* -- the original benchmark problem - *dimension_bits* -- the number of bits to use to represent each dimension - - *bounder* -- a bounder that restricts elements of candidate solutions to + - *bounder* -- a bounder that restricts elements of candidate solutions to the range [0, 1] - *maximize* -- whether the underlying benchmark problem is one of maximization - + """ def __init__(self, benchmark, dimension_bits): Benchmark.__init__(self, benchmark.dimensions, benchmark.objectives) @@ -129,7 +130,7 @@ def __init__(self, benchmark, dimension_bits): self.bounder = ec.DiscreteBounder([0, 1]) self.maximize = self.benchmark.maximize self.__class__.__name__ = self.__class__.__name__ + ' ' + self.benchmark.__class__.__name__ - + def _binary_to_real(self, binary): real = [] for d, lo, hi in zip(range(self.dimensions), self.benchmark.bounder.lower_bound, self.benchmark.bounder.upper_bound): @@ -138,98 +139,98 @@ def _binary_to_real(self, binary): value = real_val / (2**(self.dimension_bits)-1) * (hi - lo) + lo real.append(value) return real - + def generator(self, random, args): return [random.choice([0, 1]) for _ in range(self.dimensions * self.dimension_bits)] - + def evaluator(self, candidates, args): real_candidates = [] for c in candidates: real_candidates.append(self._binary_to_real(c)) return self.benchmark.evaluator(real_candidates, args) - - + + #----------------------------------------------------------------------- # SINGLE-OBJECTIVE PROBLEMS #----------------------------------------------------------------------- class Ackley(Benchmark): """Defines the Ackley benchmark problem. - - This class defines the Ackley global optimization problem. This + + This class defines the Ackley global optimization problem. This is a multimodal minimization problem defined as follows: - + .. math:: - - f(x) = -20e^{-0.2\sqrt{\\frac{1}{n} \sum_{i=1}^n x_i^2}} - e^{\\frac{1}{n} \sum_{i=1}^n \cos(2 \pi x_i)} + 20 + e - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-32, 32]` for :math:`i=1,...,n`. - + + f(x) = -20e^{-0.2\\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{\\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-32, 32]` for :math:`i=1,...,n`. + .. figure:: _static/image6011.jpg :alt: Ackley function :align: center - - Two-dimensional Ackley function + + Two-dimensional Ackley function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [0, 0, ..., 0]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-32.0] * self.dimensions, [32.0] * self.dimensions) self.maximize = False self.global_optimum = [0 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-32.0, 32.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: - fitness.append(-20 * math.exp(-0.2 * math.sqrt(1.0 / self.dimensions * sum([x**2 for x in c]))) - + fitness.append(-20 * math.exp(-0.2 * math.sqrt(1.0 / self.dimensions * sum([x**2 for x in c]))) - math.exp(1.0 / self.dimensions * sum([math.cos(2 * math.pi * x) for x in c])) + 20 + math.e) return fitness class Griewank(Benchmark): """Defines the Griewank benchmark problem. - - This class defines the Griewank global optimization problem. This - is a highly multimodal minimization problem with numerous, wide-spread, + + This class defines the Griewank global optimization problem. This + is a highly multimodal minimization problem with numerous, wide-spread, regularly distributed local minima. It is defined as follows: - + .. math:: - - f(x) = \\frac{1}{4000} \sum_{i=1}^n x_i^2 - \prod_{i=1}^n \cos(\\frac{x_i}{\sqrt{i}}) + 1 - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-600, 600]` for :math:`i=1,...,n`. - + + f(x) = \\frac{1}{4000} \\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n \\cos(\\frac{x_i}{\\sqrt{i}}) + 1 + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-600, 600]` for :math:`i=1,...,n`. + .. figure:: _static/image8891.jpg :alt: Griewank function :align: center - - Two-dimensional Griewank function + + Two-dimensional Griewank function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [0, 0, ..., 0]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-600.0] * self.dimensions, [600.0] * self.dimensions) self.maximize = False self.global_optimum = [0 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-600.0, 600.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: @@ -241,39 +242,39 @@ def evaluator(self, candidates, args): class Rastrigin(Benchmark): """Defines the Rastrigin benchmark problem. - - This class defines the Rastrigin global optimization problem. This + + This class defines the Rastrigin global optimization problem. This is a highly multimodal minimization problem where the local minima are regularly distributed. It is defined as follows: - + .. math:: - - f(x) = \sum_{i=1}^n (x_i^2 - 10\cos(2\pi x_i) + 10) - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-5.12, 5.12]` for :math:`i=1,...,n`. - + + f(x) = \\sum_{i=1}^n (x_i^2 - 10\\cos(2\\pi x_i) + 10) + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`. + .. figure:: _static/image12271.jpg :alt: Rastrigin function :align: center - - Two-dimensional Rastrigin function + + Two-dimensional Rastrigin function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [0, 0, ..., 0]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-5.12] * self.dimensions, [5.12] * self.dimensions) self.maximize = False self.global_optimum = [0 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-5.12, 5.12) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: @@ -282,40 +283,40 @@ def evaluator(self, candidates, args): class Rosenbrock(Benchmark): """Defines the Rosenbrock benchmark problem. - - This class defines the Rosenbrock global optimization problem, - also known as the "banana function." The global optimum sits - within a narrow, parabolic-shaped flattened valley. It is + + This class defines the Rosenbrock global optimization problem, + also known as the "banana function." The global optimum sits + within a narrow, parabolic-shaped flattened valley. It is defined as follows: - + .. math:: - - f(x) = \sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2] - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-5, 10]` for :math:`i=1,...,n`. - + + f(x) = \\sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2] + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 10]` for :math:`i=1,...,n`. + .. figure:: _static/image12371.jpg :alt: Rosenbrock function :align: center - - Two-dimensional Rosenbrock function + + Two-dimensional Rosenbrock function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [1, 1, ..., 1]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-5.0] * self.dimensions, [10.0] * self.dimensions) self.maximize = False self.global_optimum = [1 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-5.0, 10.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: @@ -324,115 +325,115 @@ def evaluator(self, candidates, args): total += 100 * (c[i]**2 - c[i+1])**2 + (c[i] - 1)**2 fitness.append(total) return fitness - + class Schwefel(Benchmark): """Defines the Schwefel benchmark problem. - - This class defines the Schwefel global optimization problem. + + This class defines the Schwefel global optimization problem. It is defined as follows: - + .. math:: - - f(x) = 418.9829n - \sum_{i=1}^n \\left[-x_i \sin(\sqrt{|x_i|})\\right] - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-500, 500]` for :math:`i=1,...,n`. - + + f(x) = 418.9829n - \\sum_{i=1}^n \\left[-x_i \\sin(\\sqrt{|x_i|})\\right] + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,...,n`. + .. figure:: _static/image12721.jpg :alt: Schwefel function :align: center - - Two-dimensional Schwefel function + + Two-dimensional Schwefel function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [420.9687, 420.9687, ..., 420.9687]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-500.0] * self.dimensions, [500.0] * self.dimensions) self.maximize = False self.global_optimum = [420.9687 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-500.0, 500.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: fitness.append(418.9829 * self.dimensions - sum([x * math.sin(math.sqrt(abs(x))) for x in c])) return fitness - + class Sphere(Benchmark): """Defines the Sphere benchmark problem. - + This class defines the Sphere global optimization problem, also called the "first function of De Jong's" or "De Jong's F1." It is continuous, convex, and unimodal, and it is defined as follows: - + .. math:: - - f(x) = \sum_{i=1}^n x_i^2 - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-5.12, 5.12]` for :math:`i=1,...,n`. - + + f(x) = \\sum_{i=1}^n x_i^2 + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`. + .. figure:: _static/image11981.jpg :alt: Sphere function :align: center - - Two-dimensional Sphere function + + Two-dimensional Sphere function (`image source `__) - + Public Attributes: - + - *global_optimum* -- the problem input that produces the optimum output. Here, this corresponds to [0, 0, ..., 0]. - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-5.12] * self.dimensions, [5.12] * self.dimensions) self.maximize = False self.global_optimum = [0 for _ in range(self.dimensions)] - + def generator(self, random, args): return [random.uniform(-5.12, 5.12) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: fitness.append(sum([x**2 for x in c])) return fitness - - + + #----------------------------------------------------------------------- # MULTI-OBJECTIVE PROBLEMS #----------------------------------------------------------------------- class Kursawe(Benchmark): """Defines the Kursawe multiobjective benchmark problem. - - This class defines the Kursawe multiobjective minimization problem. - This function accepts an n-dimensional input and produces a + + This class defines the Kursawe multiobjective minimization problem. + This function accepts an n-dimensional input and produces a two-dimensional output. It is defined as follows: - + .. math:: - - f_1(x) &= \sum_{i=1}^{n-1} \\left[-10e^{-0.2\sqrt{x_i^2 + x_{i+1}^2}}\\right] \\\\ - f_2(x) &= \sum_{i=1}^n \\left[|x_i|^{0.8} + 5\sin(x_i)^3\\right] \\\\ - - Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-5, 5]` for :math:`i=1,...,n`. - + + f_1(x) &= \\sum_{i=1}^{n-1} \\left[-10e^{-0.2\\sqrt{x_i^2 + x_{i+1}^2}}\\right] \\\\ + f_2(x) &= \\sum_{i=1}^n \\left[|x_i|^{0.8} + 5\\sin(x_i)^3\\right] \\\\ + + Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,n`. + .. figure:: _static/kursawefun.jpg :alt: Kursawe Pareto front :width: 700 px :align: center - - Three-dimensional Kursawe Pareto front + + Three-dimensional Kursawe Pareto front (`image source `__) - + """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions, 2) @@ -441,7 +442,7 @@ def __init__(self, dimensions=2): def generator(self, random, args): return [random.uniform(-5.0, 5.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] for c in candidates: @@ -452,36 +453,36 @@ def evaluator(self, candidates, args): class DTLZ1(Benchmark): """Defines the DTLZ1 multiobjective benchmark problem. - + This class defines the DTLZ1 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - + f_1(\\vec{x}) &= \\frac{1}{2} x_1 \\dots x_{m-1}(1 + g(\\vec{x_m})) \\\\ f_i(\\vec{x}) &= \\frac{1}{2} x_1 \\dots x_{m-i}(1 + g(\\vec{x_m})) \\\\ f_m(\\vec{x}) &= \\frac{1}{2} (1 - x_1)(1 + g(\\vec{x_m})) \\\\ - g(\\vec{x_m}) &= 100\\left[|\\vec{x_m}| + \sum_{x_i \in \\vec{x_m}}\\left((x_i - 0.5)^2 - \cos(20\pi(x_i - 0.5))\\right)\\right] \\\\ - + g(\\vec{x_m}) &= 100\\left[|\\vec{x_m}| + \\sum_{x_i \\in \\vec{x_m}}\\left((x_i - 0.5)^2 - \\cos(20\\pi(x_i - 0.5))\\right)\\right] \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 4 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 6 dimensions. - + .. figure:: _static/dtlz1funb.jpg :alt: DTLZ1 Pareto front :width: 700 px :align: center - - Three-dimensional DTLZ1 Pareto front + + Three-dimensional DTLZ1 Pareto front (`image source `__) - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -489,23 +490,23 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: 100 * (len(x) + sum([(a - 0.5)**2 - math.cos(20 * math.pi * (a - 0.5)) for a in x])) @@ -519,28 +520,28 @@ def evaluator(self, candidates, args): class DTLZ2(Benchmark): """Defines the DTLZ2 multiobjective benchmark problem. - + This class defines the DTLZ2 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - - f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1 \pi/2)\cos(x_2 \pi/2)\\dots\cos(x_{m-2} \pi/2)\cos(x_{m-1} \pi/2) \\\\ - f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1 \pi/2)\cos(x_2 \pi/2)\\dots\cos(x_{m-i} \pi/2)\sin(x_{m-i+1} \pi/2) \\\\ - f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\sin(x_1 \pi/2) \\\\ - g(\\vec{x_m}) &= \sum_{x_i \in \\vec{x_m}}(x_i - 0.5)^2 \\\\ - + + f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1 \\pi/2)\\cos(x_2 \\pi/2)\\dots\\cos(x_{m-2} \\pi/2)\\cos(x_{m-1} \\pi/2) \\\\ + f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1 \\pi/2)\\cos(x_2 \\pi/2)\\dots\\cos(x_{m-i} \\pi/2)\\sin(x_{m-i+1} \\pi/2) \\\\ + f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\\sin(x_1 \\pi/2) \\\\ + g(\\vec{x_m}) &= \\sum_{x_i \\in \\vec{x_m}}(x_i - 0.5)^2 \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 9 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 11 dimensions. - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -548,32 +549,32 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: sum([(a - 0.5)**2 for a in x]) for c in candidates: gval = g(c[self.objectives-1:]) - fit = [(1 + gval) * + fit = [(1 + gval) * reduce(lambda x,y: x*y, [math.cos(a * math.pi / 2.0) for a in c[:self.objectives-1]])] for m in reversed(range(1, self.objectives)): - fit.append((1 + gval) * + fit.append((1 + gval) * reduce(lambda x,y: x*y, [math.cos(a * math.pi / 2.0) for a in c[:m-1]], 1) * math.sin(c[m-1] * math.pi / 2.0)) fitness.append(emo.Pareto(fit)) @@ -581,28 +582,28 @@ def evaluator(self, candidates, args): class DTLZ3(Benchmark): """Defines the DTLZ3 multiobjective benchmark problem. - + This class defines the DTLZ3 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - - f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1 \pi/2)\cos(x_2 \pi/2)\\dots\cos(x_{m-2} \pi/2)\cos(x_{m-1} \pi/2) \\\\ - f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1 \pi/2)\cos(x_2 \pi/2)\\dots\cos(x_{m-i} \pi/2)\sin(x_{m-i+1} \pi/2) \\\\ - f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\sin(x_1 \pi/2) \\\\ - g(\\vec{x_m}) &= 100\\left[|\\vec{x_m}| + \sum_{x_i \in \\vec{x_m}}\\left((x_i - 0.5)^2 - \cos(20\pi(x_i - 0.5))\\right)\\right] \\\\ - + + f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1 \\pi/2)\\cos(x_2 \\pi/2)\\dots\\cos(x_{m-2} \\pi/2)\\cos(x_{m-1} \\pi/2) \\\\ + f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1 \\pi/2)\\cos(x_2 \\pi/2)\\dots\\cos(x_{m-i} \\pi/2)\\sin(x_{m-i+1} \\pi/2) \\\\ + f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\\sin(x_1 \\pi/2) \\\\ + g(\\vec{x_m}) &= 100\\left[|\\vec{x_m}| + \\sum_{x_i \\in \\vec{x_m}}\\left((x_i - 0.5)^2 - \\cos(20\\pi(x_i - 0.5))\\right)\\right] \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 9 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 11 dimensions. - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -610,23 +611,23 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: 100 * (len(x) + sum([(a - 0.5)**2 - math.cos(20 * math.pi * (a - 0.5)) for a in x])) @@ -634,7 +635,7 @@ def evaluator(self, candidates, args): gval = g(c[self.objectives-1:]) fit = [(1 + gval) * reduce(lambda x,y: x*y, [math.cos(a * math.pi / 2.0) for a in c[:self.objectives-1]])] for m in reversed(range(1, self.objectives)): - fit.append((1 + gval) * + fit.append((1 + gval) * reduce(lambda x,y: x*y, [math.cos(a * math.pi / 2.0) for a in c[:m-1]], 1) * math.sin(c[m-1] * math.pi / 2.0)) fitness.append(emo.Pareto(fit)) @@ -642,28 +643,28 @@ def evaluator(self, candidates, args): class DTLZ4(Benchmark): """Defines the DTLZ4 multiobjective benchmark problem. - + This class defines the DTLZ4 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - - f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1^\\alpha \pi/2)\cos(x_2^\\alpha \pi/2)\\dots\cos(x_{m-2}^\\alpha \pi/2)\cos(x_{m-1}^\\alpha \pi/2) \\\\ - f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(x_1^\\alpha \pi/2)\cos(x_2^\\alpha \pi/2)\\dots\cos(x_{m-i}^\\alpha \pi/2)\sin(x_{m-i+1}^\\alpha \pi/2) \\\\ - f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\sin(x_1^\\alpha \pi/2) \\\\ - g(\\vec{x_m}) &= \sum_{x_i \in \\vec{x_m}}(x_i - 0.5)^2 \\\\ - + + f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1^\\alpha \\pi/2)\\cos(x_2^\\alpha \\pi/2)\\dots\\cos(x_{m-2}^\\alpha \\pi/2)\\cos(x_{m-1}^\\alpha \\pi/2) \\\\ + f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(x_1^\\alpha \\pi/2)\\cos(x_2^\\alpha \\pi/2)\\dots\\cos(x_{m-i}^\\alpha \\pi/2)\\sin(x_{m-i+1}^\\alpha \\pi/2) \\\\ + f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\\sin(x_1^\\alpha \\pi/2) \\\\ + g(\\vec{x_m}) &= \\sum_{x_i \\in \\vec{x_m}}(x_i - 0.5)^2 \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n},` and :math:`\\alpha=100.` - + The recommendation given in the paper mentioned above is to provide 9 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 11 dimensions. - + """ def __init__(self, dimensions=2, objectives=2, alpha=100): Benchmark.__init__(self, dimensions, objectives) @@ -672,32 +673,32 @@ def __init__(self, dimensions=2, objectives=2, alpha=100): self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False self.alpha = alpha - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: sum([(a - 0.5)**2 for a in x]) for c in candidates: gval = g(c[self.objectives-1:]) - fit = [(1 + gval) * + fit = [(1 + gval) * reduce(lambda x,y: x*y, [math.cos(a**self.alpha * math.pi / 2.0) for a in c[:self.objectives-1]])] for m in reversed(range(1, self.objectives)): - fit.append((1 + gval) * + fit.append((1 + gval) * reduce(lambda x,y: x*y, [math.cos(a**self.alpha * math.pi / 2.0) for a in c[:m-1]], 1) * math.sin(c[m-1]**self.alpha * math.pi / 2.0)) fitness.append(emo.Pareto(fit)) @@ -720,23 +721,23 @@ class DTLZ5(Benchmark): CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - - f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(\\theta_1 \pi/2)\cos(\\theta_2 \pi/2)\\dots\cos(\\theta_{m-2} \pi/2)\cos(\\theta_{m-1} \pi/2) \\\\ - f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(\\theta_1 \pi/2)\cos(\\theta_2 \pi/2)\\dots\cos(\\theta_{m-i} \pi/2)\sin(\\theta_{m-i+1} \pi/2) \\\\ - f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\sin(\\theta_1 \pi/2) \\\\ - \\theta_i &= \\frac{\pi}{4(1+g(\\vec{x_m}))}(1 + 2g(\\vec{x_m}) x_i) \\\\ - g(\\vec{x_m}) &= \sum_{x_i \in \\vec{x_m}}(x_i - 0.5)^2 \\\\ - + + f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(\\theta_1 \\pi/2)\\cos(\\theta_2 \\pi/2)\\dots\\cos(\\theta_{m-2} \\pi/2)\\cos(\\theta_{m-1} \\pi/2) \\\\ + f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(\\theta_1 \\pi/2)\\cos(\\theta_2 \\pi/2)\\dots\\cos(\\theta_{m-i} \\pi/2)\\sin(\\theta_{m-i+1} \\pi/2) \\\\ + f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\\sin(\\theta_1 \\pi/2) \\\\ + \\theta_i &= \\frac{\\pi}{4(1+g(\\vec{x_m}))}(1 + 2g(\\vec{x_m}) x_i) \\\\ + g(\\vec{x_m}) &= \\sum_{x_i \\in \\vec{x_m}}(x_i - 0.5)^2 \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 9 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 11 dimensions. - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -744,13 +745,13 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - This function returns a globally optimal solution (i.e., a + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. .. warning:: @@ -760,7 +761,7 @@ def global_optimum(self): Huband, Simon, et al. "A review of multiobjective test problems and a scalable test problem toolkit." Evolutionary Computation, IEEE Transactions on 10.5 (2006): 477-506. - + """ if self.objectives >= 4: warnings.warn( @@ -769,10 +770,10 @@ def global_optimum(self): x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: sum([(a - 0.5)**2 for a in x]) @@ -793,29 +794,29 @@ def evaluator(self, candidates, args): class DTLZ6(Benchmark): """Defines the DTLZ6 multiobjective benchmark problem. - + This class defines the DTLZ6 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - - f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(\\theta_1 \pi/2)\cos(\\theta_2 \pi/2)\\dots\cos(\\theta_{m-2} \pi/2)\cos(\\theta_{m-1} \pi/2) \\\\ - f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\cos(\\theta_1 \pi/2)\cos(\\theta_2 \pi/2)\\dots\cos(\\theta_{m-i} \pi/2)\sin(\\theta_{m-i+1} \pi/2) \\\\ - f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\sin(\\theta_1 \pi/2) \\\\ - \\theta_i &= \\frac{\pi}{4(1+g(\\vec{x_m}))}(1 + 2g(\\vec{x_m}) x_i) \\\\ - g(\\vec{x_m}) &= \sum_{x_i \in \\vec{x_m}}x_i^{0.1} \\\\ - + + f_1(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(\\theta_1 \\pi/2)\\cos(\\theta_2 \\pi/2)\\dots\\cos(\\theta_{m-2} \\pi/2)\\cos(\\theta_{m-1} \\pi/2) \\\\ + f_i(\\vec{x}) &= (1 + g(\\vec{x_m}))\\cos(\\theta_1 \\pi/2)\\cos(\\theta_2 \\pi/2)\\dots\\cos(\\theta_{m-i} \\pi/2)\\sin(\\theta_{m-i+1} \\pi/2) \\\\ + f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))\\sin(\\theta_1 \\pi/2) \\\\ + \\theta_i &= \\frac{\\pi}{4(1+g(\\vec{x_m}))}(1 + 2g(\\vec{x_m}) x_i) \\\\ + g(\\vec{x_m}) &= \\sum_{x_i \\in \\vec{x_m}}x_i^{0.1} \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 9 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 11 dimensions. - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -823,23 +824,23 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: sum([a**0.1 for a in x]) @@ -860,37 +861,37 @@ def evaluator(self, candidates, args): class DTLZ7(Benchmark): """Defines the DTLZ7 multiobjective benchmark problem. - + This class defines the DTLZ7 multiobjective minimization problem taken from `(Deb et al., "Scalable Multi-Objective Optimization Test Problems." CEC 2002, pp. 825--830) `__. This function accepts an n-dimensional input and produces an m-dimensional output. It is defined as follows: - + .. math:: - + f_1(\\vec{x}) &= x_1 \\\\ f_i(\\vec{x}) &= x_i \\\\ f_m(\\vec{x}) &= (1 + g(\\vec{x_m}))h(f_1, f_2, \\dots, f_{m-1}, g) \\\\ - g(\\vec{x_m}) &= 1 + \\frac{9}{|\\vec{x_m}|}\sum_{x_i \in \\vec{x_m}}x_i \\\\ - h(f_1, f_2, \\dots, f_{m-1}, g) &= m - \sum_{i=1}^{m-1}\\left[\\frac{f_1}{1+g}(1 + \sin(3\pi f_i))\\right] \\\\ - + g(\\vec{x_m}) &= 1 + \\frac{9}{|\\vec{x_m}|}\\sum_{x_i \\in \\vec{x_m}}x_i \\\\ + h(f_1, f_2, \\dots, f_{m-1}, g) &= m - \\sum_{i=1}^{m-1}\\left[\\frac{f_1}{1+g}(1 + \\sin(3\\pi f_i))\\right] \\\\ + Here, :math:`n` represents the number of dimensions, :math:`m` represents the - number of objectives, :math:`x_i \in [0, 1]` for :math:`i=1,...,n`, and + number of objectives, :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`, and :math:`\\vec{x_m} = x_m x_{m+1} \\dots x_{n}.` - + The recommendation given in the paper mentioned above is to provide 19 more dimensions than objectives. For instance, if we want to use 2 objectives, we should use 21 dimensions. - + .. figure:: _static/dtlz7funb.jpg :alt: DTLZ7 Pareto front :width: 700 px :align: center - - Three-dimensional DTLZ7 Pareto front + + Three-dimensional DTLZ7 Pareto front (`image source `__) - + """ def __init__(self, dimensions=2, objectives=2): Benchmark.__init__(self, dimensions, objectives) @@ -898,23 +899,23 @@ def __init__(self, dimensions=2, objectives=2): raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False - + def global_optimum(self): """Return a globally optimal solution to this problem. - - This function returns a globally optimal solution (i.e., a + + This function returns a globally optimal solution (i.e., a solution that lives on the Pareto front). Since there are many - solutions that are Pareto-optimal, this function randomly + solutions that are Pareto-optimal, this function randomly chooses one to return. - + """ x = [random.uniform(0, 1) for _ in range(self.objectives - 1)] x.extend([0 for _ in range(self.dimensions - self.objectives + 1)]) return x - + def generator(self, random, args): return [random.uniform(0.0, 1.0) for _ in range(self.dimensions)] - + def evaluator(self, candidates, args): fitness = [] g = lambda x: 1 + 9.0 / len(x) * sum([a for a in x]) @@ -934,39 +935,39 @@ def evaluator(self, candidates, args): class TSP(Benchmark): """Defines the Traveling Salesman benchmark problem. - + This class defines the Traveling Salesman problem: given a set of locations and their pairwise distances, find the shortest route that - visits each location once and only once. This problem assumes that - the ``weights`` parameter is an *n*-by-*n* list of pairwise - distances among *n* locations. This problem is treated as a - maximization problem, so fitness values are determined to be the + visits each location once and only once. This problem assumes that + the ``weights`` parameter is an *n*-by-*n* list of pairwise + distances among *n* locations. This problem is treated as a + maximization problem, so fitness values are determined to be the reciprocal of the total path length. - + In the case of typical evolutionary computation, a candidate solution - is represented as a permutation of the *n*-element list of the integers + is represented as a permutation of the *n*-element list of the integers from 0 to *n*-1. In the case of ant colony optimization, a candidate solution is represented by a list of ``TrailComponent`` objects which have (source, destination) tuples as their elements and the reciprocal - of the weight from source to destination as their values. - - If evolutionary computation is to be used, then the ``generator`` - function should be used to create candidates. If ant colony - optimization is used, then the ``constructor`` function creates - candidates. The ``evaluator`` function performs the evaluation for + of the weight from source to destination as their values. + + If evolutionary computation is to be used, then the ``generator`` + function should be used to create candidates. If ant colony + optimization is used, then the ``constructor`` function creates + candidates. The ``evaluator`` function performs the evaluation for both types of candidates. - + Public Attributes: - - - *weights* -- the two-dimensional list of pairwise distances + + - *weights* -- the two-dimensional list of pairwise distances - *components* -- the set of ``TrailComponent`` objects constructed from the ``weights`` attribute, where the element is the (source, - destination) tuple and the value is the reciprocal of its + destination) tuple and the value is the reciprocal of its ``weights`` entry - *bias* -- the bias in selecting the component of maximum desirability - when constructing a candidate solution for ant colony optimization + when constructing a candidate solution for ant colony optimization (default 0.5) - + """ def __init__(self, weights): Benchmark.__init__(self, len(weights)) @@ -982,7 +983,7 @@ def generator(self, random, args): locations = [i for i in range(len(self.weights))] random.shuffle(locations) return locations - + def constructor(self, random, args): """Return a candidate solution for an ant colony optimization.""" self._use_ants = True @@ -1012,7 +1013,7 @@ def constructor(self, random, args): next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0] candidate.append(next_component) return candidate - + def evaluator(self, candidates, args): """Return the fitness values for the given candidates.""" fitness = [] @@ -1034,38 +1035,38 @@ def evaluator(self, candidates, args): class Knapsack(Benchmark): """Defines the Knapsack benchmark problem. - + This class defines the Knapsack problem: given a set of items, each with a weight and a value, find the set of items of maximal value - that fit within a knapsack of fixed weight capacity. This problem - assumes that the ``items`` parameter is a list of (weight, value) + that fit within a knapsack of fixed weight capacity. This problem + assumes that the ``items`` parameter is a list of (weight, value) tuples. This problem is most easily defined as a maximization problem, where the total value contained in the knapsack is to be maximized. However, for the evolutionary computation (which may create infeasible solutions that exceed the knapsack capacity), the fitness is either the total value in the knapsack (for feasible solutions) or the - negative difference between the actual contents and the maximum + negative difference between the actual contents and the maximum capacity of the knapsack. - If evolutionary computation is to be used, then the ``generator`` - function should be used to create candidates. If ant colony - optimization is used, then the ``constructor`` function creates - candidates. The ``evaluator`` function performs the evaluation for + If evolutionary computation is to be used, then the ``generator`` + function should be used to create candidates. If ant colony + optimization is used, then the ``constructor`` function creates + candidates. The ``evaluator`` function performs the evaluation for both types of candidates. - + Public Attributes: - + - *capacity* -- the weight capacity of the knapsack - *items* -- a list of (weight, value) tuples corresponding to the possible items to be placed into the knapsack - *components* -- the set of ``TrailComponent`` objects constructed from the ``items`` parameter - - *duplicates* -- Boolean value specifying whether items may be + - *duplicates* -- Boolean value specifying whether items may be duplicated in the knapsack (i.e., False corresponds to 0/1 Knapsack) - *bias* -- the bias in selecting the component of maximum desirability - when constructing a candidate solution for ant colony optimization + when constructing a candidate solution for ant colony optimization (default 0.5) - + """ def __init__(self, capacity, items, duplicates=False): Benchmark.__init__(self, len(items)) @@ -1081,7 +1082,7 @@ def __init__(self, capacity, items, duplicates=False): self.bounder = ec.DiscreteBounder([0, 1]) self.maximize = True self._use_ants = False - + def generator(self, random, args): """Return a candidate solution for an evolutionary computation.""" if self.duplicates: @@ -1089,7 +1090,7 @@ def generator(self, random, args): return [random.randint(0, m) for m in max_count] else: return [random.choice([0, 1]) for _ in range(len(self.items))] - + def constructor(self, random, args): """Return a candidate solution for an ant colony optimization.""" self._use_ants = True @@ -1115,7 +1116,7 @@ def constructor(self, random, args): next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0] candidate.append(next_component) return candidate - + def evaluator(self, candidates, args): """Return the fitness values for the given candidates.""" fitness = []