diff --git a/manual/v2.0.2/en/.buildinfo b/manual/v2.0.2/en/.buildinfo
new file mode 100644
index 00000000..be53f85e
--- /dev/null
+++ b/manual/v2.0.2/en/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file records the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: cad319f0e7bfacb48db2344eb6175754
+tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_15_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_15_1.png
new file mode 100644
index 00000000..2f357d0e
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_15_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_16_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_16_1.png
new file mode 100644
index 00000000..4f591a9d
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_16_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_25_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_25_1.png
new file mode 100644
index 00000000..a6a80bcb
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_25_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_27_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_27_1.png
new file mode 100644
index 00000000..8487778a
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_27_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_22_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_22_1.png
new file mode 100644
index 00000000..f80eca59
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_22_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_23_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_23_1.png
new file mode 100644
index 00000000..745455d7
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_basic_org_23_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_12_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_12_0.png
new file mode 100644
index 00000000..a13db00e
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_12_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_14_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_14_0.png
new file mode 100644
index 00000000..071ddc70
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_14_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_28_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_28_0.png
new file mode 100644
index 00000000..08be0187
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_28_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_35_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_35_0.png
new file mode 100644
index 00000000..520a8baa
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_35_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_41_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_41_0.png
new file mode 100644
index 00000000..5ac1f3b1
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_41_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_47_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_47_0.png
new file mode 100644
index 00000000..a9be02f0
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_47_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_53_0.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_53_0.png
new file mode 100644
index 00000000..4ce45ecd
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_objective_53_0.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_11_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_11_1.png
new file mode 100644
index 00000000..0068091e
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_11_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_12_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_12_1.png
new file mode 100644
index 00000000..85da5350
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_12_1.png differ
diff --git a/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_14_1.png b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_14_1.png
new file mode 100644
index 00000000..502aa769
Binary files /dev/null and b/manual/v2.0.2/en/_images/notebook_tutorial_multi_probe_14_1.png differ
diff --git a/manual/v2.0.2/en/_modules/index.html b/manual/v2.0.2/en/_modules/index.html
new file mode 100644
index 00000000..651a8513
--- /dev/null
+++ b/manual/v2.0.2/en/_modules/index.html
@@ -0,0 +1,197 @@
+
+
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# -*- coding:utf-8 -*-
+importnumpyasnp
+
+
+
+[docs]
+classfourier:
+"""
+ random feature maps
+ ``Psi(X; W,b) = cos[X * Wt + b] * alpha``
+ where
+
+ - X: input, N-by-d matrix
+ - W: weight, l-by-d matrix
+ - Wt: transpose of W
+ - b: bias, 1-by-l matrix
+ - alpha: coefficient
+
+ and
+
+ - N: number of data
+ - d: dimension of input
+ - l: number of basis
+
+ Attributes
+ ==========
+ params: Tuple
+ W, b, alpha
+ nbasis: int
+ number of basis
+
+ References
+ ==========
+ A. Rahimi and B. Recht, "Random features for large-scale kernel machines,"
+ in "Advances in neural information processing systems," 2007, pp. 1177-1184.
+ """
+
+ def__init__(self,params):
+"""
+ Parameters
+ ----------
+ params: Tuple
+ W, b, alpha
+ """
+ self._check_params(params)
+ self._check_len_params(params)
+ self.params=params
+ self.nbasis=self.params[1].shape[0]
+
+
+
+
+ def_check_params(self,params):
+"""
+ Parameters
+ ==========
+ params: tuple
+ W, b, alpha
+
+ Raises
+ ======
+ ValueError
+ if ``params`` is not a 3-dimensional tuple
+ """
+ ifnotisinstance(params,tuple):
+ raiseValueError("The variable < params > must be a tuple.")
+
+ iflen(params)!=3:
+ raiseValueError("The variable < params > must be 3-dimensional tuple.")
+
+ def_check_len_params(self,params):
+"""
+ Parameters
+ ==========
+ params: tuple
+ W, b, alpha
+
+
+ Raises
+ ======
+ ValueError
+ when dim of W and b are mismatch
+ or alpha is not a scalar
+ """
+ ifparams[0].shape[0]!=params[1].shape[0]:
+ raiseValueError(
+ "The length of 0-axis of W must be same as the length of b."
+ )
+
+ ifhasattr(params[2],"__len__"):
+ iflen(params[2])!=1:
+ raiseValueError("The third entry of <params> must be a scalar.")
+ else:
+ ifisinstance(params[2],str):
+ raiseValueError("The third entry of <params> must be a scalar.")
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+from..importinf
+
+
+
+[docs]
+classmodel:
+"""
+ Baysean Linear Model
+
+ Attributes
+ ==========
+ prior: physbo.blm.prior.gauss
+ prior distribution of weights
+ lik: physbo.blm.lik.gauss
+ kernel
+ nbasis: int
+ number of features in random feature map
+ stats: Tuple
+ auxially parameters for sampling
+ method: str
+ sampling method
+ """
+
+ def__init__(self,lik,prior,options={}):
+ self.prior=prior
+ self.lik=lik
+ self.nbasis=self.lik.linear.basis.nbasis
+ self._init_prior(prior)
+ self._set_options(options)
+ self.stats=()
+
+
+[docs]
+ defprepare(self,X,t,Psi=None):
+"""
+ initializes model by using the first training dataset
+
+ Parameters
+ ==========
+ X: numpy.ndarray
+ inputs
+ t: numpy.ndarray
+ target (label)
+ Psi: numpy.ndarray
+ feature maps
+
+ See also
+ ========
+ physbo.blm.inf.exact.prepare
+ """
+ ifself.method=="exact":
+ inf.exact.prepare(blm=self,X=X,t=t,Psi=Psi)
+ else:
+ pass
+
+
+
+[docs]
+ defupdate_stats(self,x,t,psi=None):
+"""
+ updates model by using another training data
+
+ Parameters
+ ==========
+ x: numpy.ndarray
+ input
+ t: float
+ target (label)
+ psi: numpy.ndarray
+ feature map
+
+ See also
+ ========
+ physbo.blm.inf.exact.update_stats
+ """
+ ifself.method=="exact":
+ self.stats=inf.exact.update_stats(self,x,t,psi)
+ else:
+ pass
+
+
+
+[docs]
+ defget_post_params_mean(self):
+"""
+ calculates posterior mean of weights
+
+ Returns
+ =======
+ numpy.ndarray
+
+ See also
+ ========
+ physbo.blm.inf.exact.get_post_params_mean
+ """
+ ifself.method=="exact":
+ self.lik.linear.params=inf.exact.get_post_params_mean(blm=self)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+importphysbo.miscasmisc
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classlinear:
+"""
+
+ Attributes
+ ==========
+ basis:
+ basis for random feature map
+ nbasis: int
+ number of basis
+ bias:
+ params:
+ _init_params:
+ initial value of the parameter
+ """
+
+ def__init__(self,basis,params=None,bias=None):
+ self.basis=basis
+ self.nbasis=basis.nbasis
+ self._init_params=params
+ self.bias=bias
+ self.params=params
+
+ ifparamsisNone:
+ self.params=np.zeros(self.nbasis)
+ self.nparams=self.nbasis
+
+
+[docs]
+ defget_mean(self,X,Psi=None,params=None,bias=None):
+"""
+ calculate mean values
+
+ Parameters
+ ==========
+ X: numpy.ndarray
+ input as an N-by-d matrix
+ Psi: numpy.ndarray
+ feature maps ``Psi(X)`` as an N-by-l matrix
+ (default: self.get_basis(X))
+ params: numpy.ndarray
+ weight as a vector with size l
+ (default: self.params)
+ bias: float
+ (default: self.bias)
+
+ Returns
+ =======
+ numpy.ndarray
+ Psi * params + bias
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ ifbiasisNone:
+ bias=np.copy(self.bias)
+
+ ifPsiisNone:
+ Psi=self.get_basis(X)
+
+ returnPsi.dot(params)+bias
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importphysbo.predictor
+
+
+
+[docs]
+ deffit(self,training,num_basis=None):
+"""
+ fit model to training dataset
+
+ Parameters
+ ==========
+ training: physbo.variable
+ dataset for training
+ num_basis: int
+ the number of basis (default: self.config.predict.num_basis)
+ """
+ ifnum_basisisNone:
+ num_basis=self.config.predict.num_basis
+
+ ifself.model.prior.cov.num_dimisNone:
+ self.model.prior.cov.num_dim=training.X.shape[1]
+ self.model.fit(training.X,training.t,self.config)
+ self.blm=self.model.export_blm(num_basis)
+ self.delete_stats()
+
+
+
+[docs]
+ defprepare(self,training):
+"""
+ initializes model by using training data set
+
+ Parameters
+ ==========
+ training: physbo.variable
+ dataset for training
+ """
+ self.blm.prepare(training.X,training.t,training.Z)
+[docs]
+ defget_post_fmean(self,training,test):
+"""
+ calculates posterior mean value of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_fmean(test.X,test.Z)
+
+
+
+[docs]
+ defget_post_fcov(self,training,test):
+"""
+ calculates posterior variance-covariance matrix of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_fcov(test.X,test.Z)
+
+
+
+[docs]
+ defget_post_params(self,training,test):
+"""
+ calculates posterior weights
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs (not used)
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_params_mean()
+
+
+
+[docs]
+ defget_post_samples(self,training,test,N=1,alpha=1.0):
+"""
+ draws samples of mean values of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.post_sampling(test.X,Psi=test.Z,N=N,alpha=alpha)
+
+
+
+[docs]
+ defget_predict_samples(self,training,test,N=1):
+"""
+ draws samples of values of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+
+ Returns
+ =======
+ numpy.ndarray (N x len(test))
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.predict_sampling(test.X,Psi=test.Z,N=N).transpose()
+
+
+
+[docs]
+ defupdate(self,training,test):
+"""
+ updates the model.
+
+ If not yet initialized (prepared), the model will be prepared by ``training``.
+ Otherwise, the model will be updated by ``test``.
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset for initialization (preparation).
+ If already prepared, the model ignore this.
+ test: physbo.variable
+ training data for update.
+ If not prepared, the model ignore this.
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnNone
+
+ ifhasattr(test.t,"__len__"):
+ N=len(test.t)
+ else:
+ N=1
+
+ ifN==1:
+ iftest.ZisNone:
+ iftest.X.ndim==1:
+ self.blm.update_stats(test.X,test.t)
+ else:
+ self.blm.update_stats(test.X[0,:],test.t)
+ else:
+ iftest.Z.ndim==1:
+ self.blm.update_stats(test.X,test.t,psi=test.Z)
+ else:
+ self.blm.update_stats(test.X[0,:],test.t,psi=test.Z[0,:])
+ else:
+ forninrange(N):
+ iftest.ZisNone:
+ self.blm.update_stats(test.X[n,:],test.t[n])
+ else:
+ self.blm.update_stats(test.X[n,:],test.t[n],psi=test.Z[n,:])
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classcov_const:
+"""
+ isotropic variance-covariance
+
+ All elements have the same variance and are independent with each other
+
+ Attributes
+ ==========
+ params: float
+ half of log of covariance
+ sigma2: float
+ covariance
+ prec: float
+ precision (= inv. of covariance)
+ """
+
+ def__init__(self,params=None):
+"""
+ Parameters
+ ==========
+ params: float
+ half of log of covariance
+ (default: numpy.log(1))
+ """
+ ifparamsisNone:
+ self.params=np.log(1)
+ self.sigma2,self.prec=self._trans_params(params)
+
+
+[docs]
+ defget_cov(self,nbasis,params=None):
+"""
+ computes the covariance
+
+ Parameters
+ ==========
+ nbasis: int
+ the number of components
+ params: float
+ half of log of variance
+ (default: self.params)
+
+ Returns
+ =======
+ numpy.ndarray
+ nbasis-by-n-basis covariance matrix
+ """
+ ifparamsisNone:
+ params=self.params
+ sigma2,prec=self._trans_params(params)
+ returnnp.identity(nbasis)*sigma2
+
+
+
+[docs]
+ defget_prec(self,nbasis,params=None):
+"""
+ computes the precision
+
+ Parameters
+ ==========
+ nbasis: int
+ the number of components
+ params: float
+ half of log of variance
+ (default: self.params)
+
+ Returns
+ =======
+ numpy.ndarray
+ nbasis-by-n-basis precision matrix
+ """
+ ifparamsisNone:
+ params=self.params
+ sigma2,prec=self._trans_params(params)
+ returnnp.identity(nbasis)*prec
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# coding=utf-8
+importnumpyasnp
+importscipy.optimize
+
+
+
+[docs]
+ defrun(self,X,t):
+"""
+ Performing optimization using the L-BFGS-B algorithm
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+ """
+ batch_size=self.config.learning.batch_size
+ sub_X,sub_t=self.gp.sub_sampling(X,t,batch_size)
+
+ ifself.config.learning.num_init_params_search!=0:
+ is_init_params_search=True
+ else:
+ is_init_params_search=False
+
+ ifis_init_params_search:
+ params=self.init_params_search(sub_X,sub_t)
+ else:
+ params=np.copy(self.gp.params)
+
+ params=self.one_run(params,sub_X,sub_t)
+ returnparams
+
+
+
+[docs]
+ defone_run(self,params,X,t,max_iter=None):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Initial guess for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ max_iter: int
+ Maximum number of iterations to perform.
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+ """
+
+ # is_disp: Set to True to print convergence messages.
+ is_disp=True
+
+ ifmax_iterisNone:
+ is_disp=self.config.learning.is_disp
+ max_iter=int(self.config.learning.max_iter)
+
+ args=(X,t)
+ bound=self.gp.get_params_bound()
+ res=scipy.optimize.minimize(
+ fun=self.gp.eval_marlik,
+ args=args,
+ x0=params,
+ method="L-BFGS-B",
+ jac=self.gp.get_grad_marlik,
+ bounds=bound,
+ options={"disp":is_disp,"maxiter":max_iter},
+ )
+
+ returnres.x
+
+
+
+[docs]
+ definit_params_search(self,X,t):
+"""
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The parameters which give the minimum marginal likelihood.
+ """
+ num_init_params_search=self.config.learning.num_init_params_search
+ max_iter=int(self.config.learning.max_iter_init_params_search)
+ min_params=np.zeros(self.gp.num_params)
+ min_marlik=np.inf
+
+ foriinrange(num_init_params_search):
+ params=self.gp.get_cand_params(X,t)
+ params=self.one_run(params,X,t,max_iter)
+ marlik=self.gp.eval_marlik(params,X,t)
+
+ ifmin_marlik>marlik:
+ min_marlik=marlik
+ min_params=params
+
+ # print 'minimum marginal likelihood = ', min_marlik
+ returnmin_params
+
+
+
+
+
+[docs]
+classonline(object):
+"""
+ base class for online learning
+ """
+
+ def__init__(self,gp,config):
+"""
+
+ Parameters
+ ----------
+ gp : model (gp.core.model)
+ config: set_config (misc.set_config)
+ """
+ self.gp=gp
+ self.config=config
+ self.num_iter=0
+
+
+[docs]
+ defrun(self,X,t):
+"""
+ Run initial search and hyper parameter running.
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+
+ """
+ ifself.config.learning.num_init_params_search!=0:
+ is_init_params_search=True
+ else:
+ is_init_params_search=False
+
+ is_disp=self.config.learning.is_disp
+ ifis_init_params_search:
+ ifis_disp:
+ print("Start the initial hyper parameter searching ...")
+ params=self.init_params_search(X,t)
+ ifis_disp:
+ print("Done\n")
+ else:
+ params=np.copy(self.params)
+
+ ifis_disp:
+ print("Start the hyper parameter learning ...")
+ params=self.one_run(params,X,t)
+ ifis_disp:
+ print("Done\n")
+
+ returnparams
+
+
+
+[docs]
+ defone_run(self,params,X,t,max_epoch=None,is_disp=False):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ max_epoch: int
+ Maximum candidate epochs
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+
+ """
+ num_data=X.shape[0]
+ batch_size=self.config.learning.batch_size
+
+ ifbatch_size>num_data:
+ batch_size=num_data
+
+ ifmax_epochisNone:
+ max_epoch=self.config.learning.max_epoch
+ is_disp=self.config.learning.is_disp
+
+ num_disp=self.config.learning.num_disp
+ eval_size=self.config.learning.eval_size
+ eval_X,eval_t=self.gp.sub_sampling(X,t,eval_size)
+ timing=range(0,max_epoch,int(np.floor(max_epoch/num_disp)))
+ temp=0
+
+ fornum_epochinrange(0,max_epoch):
+ perm=np.random.permutation(num_data)
+
+ ifis_dispandtemp<num_dispandnum_epoch==timing[temp]:
+ self.disp_marlik(params,eval_X,eval_t,num_epoch)
+ temp+=1
+
+ forninrange(0,num_data,batch_size):
+ tmp_index=perm[n:n+batch_size]
+ iflen(tmp_index)==batch_size:
+ self.num_iter+=1
+ subX=X[tmp_index,:]
+ subt=t[tmp_index]
+ params+=self.get_one_update(params,subX,subt)
+
+ ifis_disp:
+ self.disp_marlik(params,eval_X,eval_t,num_epoch+1)
+
+ self.reset()
+ returnparams
+
+
+
+[docs]
+ defdisp_marlik(self,params,eval_X,eval_t,num_epoch=None):
+"""
+ Displaying marginal likelihood
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ eval_X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ eval_t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ num_epoch: int
+ Number of epochs
+
+ Returns
+ -------
+
+ """
+ marlik=self.gp.eval_marlik(params,eval_X,eval_t)
+ ifnum_epochisnotNone:
+ print(num_epoch,end=" ")
+ print("-th epoch",end=" ")
+
+ print("marginal likelihood",marlik)
+
+
+
+[docs]
+ definit_params_search(self,X,t):
+"""
+ Initial parameter searchs
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The parameter which gives the minimum likelihood.
+ """
+ num_init_params_search=self.config.learning.num_init_params_search
+ is_disp=self.config.learning.is_disp
+ max_epoch=self.config.learning.max_epoch_init_params_search
+ eval_size=self.config.learning.eval_size
+ eval_X,eval_t=self.gp.sub_sampling(X,t,eval_size)
+ min_params=np.zeros(self.gp.num_params)
+ min_marlik=np.inf
+
+ foriinrange(num_init_params_search):
+ params=self.gp.get_cand_params(X,t)
+
+ params=self.one_run(params,X,t,max_epoch)
+ marlik=self.gp.eval_marlik(params,eval_X,eval_t)
+
+ ifmin_marlik>marlik:
+ min_marlik=marlik
+ min_params=params
+
+ # print 'minimum marginal likelihood = ', min_marlik
+ returnmin_params
+[docs]
+ defget_one_update(self,params,X,t):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ Returns
+ -------
+
+ """
+ grad=self.gp.get_grad_marlik(params,X,t)
+ self.m=self.m*self.beta+grad*(1-self.beta)
+ self.v=self.v*self.gamma+grad**2*(1-self.gamma)
+ hat_m=self.m/(1-self.beta**(self.num_iter))
+ hat_v=self.v/(1-self.gamma**(self.num_iter))
+ return-self.alpha*hat_m/(np.sqrt(hat_v)+self.epsilon)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+fromphysboimportblm
+fromphysbo.gpimportinf
+fromphysbo.gp.coreimportlearning
+fromphysbo.gp.core.priorimportprior
+
+
+
+[docs]
+ defsub_sampling(self,X,t,N):
+"""
+ Make subset for sampling
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of data in subset
+ Returns
+ -------
+ subX: numpy.ndarray
+ subt: numpy.ndarray
+ """
+ num_data=X.shape[0]
+
+ ifNisnotNoneandN<num_data:
+ index=np.random.permutation(num_data)
+ subX=X[index[0:N],:]
+ subt=t[index[0:N]]
+ else:
+ subX=X
+ subt=t
+ returnsubX,subt
+
+
+
+[docs]
+ defexport_blm(self,num_basis):
+"""
+ Exporting the blm(Baysean linear model) predictor
+
+ Parameters
+ ----------
+ num_basis: int
+ Total number of basis
+ Returns
+ -------
+ physbo.blm.core.model
+ """
+ ifnothasattr(self.prior.cov,"rand_expans"):
+ raiseValueError("The kernel must be.")
+
+ basis_params=self.prior.cov.rand_expans(num_basis)
+ basis=blm.basis.fourier(basis_params)
+ prior=blm.prior.gauss(num_basis)
+ lik=blm.lik.gauss(
+ blm.lik.linear(basis,bias=self.prior.get_mean(1)),
+ blm.lik.cov(self.lik.params),
+ )
+ blr=blm.model(lik,prior)
+
+ returnblr
+
+
+
+[docs]
+ defeval_marlik(self,params,X,t,N=None):
+"""
+ Evaluating marginal likelihood.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of subset data (if not specified, all dataset is used)
+ Returns
+ -------
+ marlik: float
+ Marginal likelihood.
+ """
+ subX,subt=self.sub_sampling(X,t,N)
+ ifself.inf=="exact":
+ marlik=inf.exact.eval_marlik(self,subX,subt,params=params)
+ else:
+ pass
+
+ returnmarlik
+
+
+
+[docs]
+ defget_grad_marlik(self,params,X,t,N=None):
+"""
+ Evaluating gradiant of marginal likelihood.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of subset data (if not specified, all dataset is used)
+
+ Returns
+ -------
+ grad_marlik: numpy.ndarray
+ Gradiant of marginal likelihood.
+ """
+ subX,subt=self.sub_sampling(X,t,N)
+ ifself.inf=="exact":
+ grad_marlik=inf.exact.get_grad_marlik(self,subX,subt,params=params)
+ returngrad_marlik
+
+
+
+[docs]
+ defget_params_bound(self):
+"""
+ Getting boundary of the parameters.
+
+ Returns
+ -------
+ bound: list
+ An array with the tuple (min_params, max_params).
+ """
+ ifself.lik.num_params!=0:
+ bound=self.lik.get_params_bound()
+
+ ifself.prior.mean.num_params!=0:
+ bound.extend(self.prior.mean.get_params_bound())
+
+ ifself.prior.cov.num_params!=0:
+ bound.extend(self.prior.cov.get_params_bound())
+ returnbound
+
+
+
+[docs]
+ defprepare(self,X,t,params=None):
+"""
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ ifself.inf=="exact":
+ self.stats=inf.exact.prepare(self,X,t,params)
+ else:
+ pass
+[docs]
+ defprint_params(self):
+"""
+ Printing parameters
+ """
+ print("\n")
+ ifself.lik.num_params!=0:
+ print("likelihood parameter = ",self.lik.params)
+
+ ifself.prior.mean.num_params!=0:
+ print("mean parameter in GP prior: ",self.prior.mean.params)
+
+ print("covariance parameter in GP prior: ",self.prior.cov.params)
+ print("\n")
+
+
+
+[docs]
+ defget_cand_params(self,X,t):
+"""
+ Getting candidate for parameters
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ Returns
+ -------
+ params: numpy.ndarray
+ Parameters
+ """
+ params=np.zeros(self.num_params)
+ ifself.lik.num_params!=0:
+ params[0:self.lik.num_params]=self.lik.get_cand_params(t)
+
+ temp=self.lik.num_params
+
+ ifself.prior.mean.num_params!=0:
+ params[
+ temp:temp+self.prior.mean.num_params
+ ]=self.prior.mean.get_cand_params(t)
+
+ temp+=self.prior.mean.num_params
+
+ ifself.prior.cov.num_params!=0:
+ params[temp:]=self.prior.cov.get_cand_params(X,t)
+
+ returnparams
+
+
+
+[docs]
+ deffit(self,X,t,config):
+"""
+ Fitting function (update parameters)
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ config: physbo.misc.set_config object
+
+ """
+ method=config.learning.method
+
+ ifmethod=="adam":
+ adam=learning.adam(self,config)
+ params=adam.run(X,t)
+
+ ifmethodin("bfgs","batch"):
+ bfgs=learning.batch(self,config)
+ params=bfgs.run(X,t)
+
+ self.set_params(params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+
+
+[docs]
+ defdecomp_params(self,params):
+"""
+ decomposing the parameters to those of mean values and covariance matrix for priors
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ mean_params: numpy.ndarray
+ cov_params: numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ mean_params=params[0:self.mean.num_params]
+ cov_params=params[self.mean.num_params:]
+ returnmean_params,cov_params
+
+
+
+[docs]
+ defget_mean(self,num_data,params=None):
+"""
+ Calculating the mean value of priors
+
+ Parameters
+ ----------
+ num_data: int
+ Total number of data
+ params: numpy.ndarray
+ Parameters
+ Returns
+ -------
+ float
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ returnself.mean.get_mean(num_data,params[0:self.mean.num_params])
+
+
+
+[docs]
+ defget_cov(self,X,Z=None,params=None,diag=False):
+"""
+ Calculating the variance-covariance matrix of priors
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+ diag: bool
+ If X is the diagonalization matrix, true.
+ Returns
+ -------
+ numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ returnself.cov.get_cov(X,Z,params=params[self.mean.num_params:],diag=diag)
+
+
+
+[docs]
+ defget_grad_mean(self,num_data,params=None):
+"""
+ Calculating the gradiant of mean values of priors
+
+ Parameters
+ ----------
+ num_data: int
+ Total number of data
+ params: numpy.ndarray
+ Parameters
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ mean_params,cov_params=self.decomp_params(params)
+ returnself.mean.get_grad(num_data,params=mean_params)
+
+
+
+[docs]
+ defget_grad_cov(self,X,params=None):
+"""
+ Calculating the covariance matrix priors
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ mean_params,cov_params=self.decomp_params(params)
+ returnself.cov.get_grad(X,params=cov_params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# -*- coding:utf-8 -*-
+importnumpyasnp
+fromscipyimportspatial
+from._src.enhance_gaussimportgrad_width64
+
+
+
+[docs]
+classgauss:
+"""gaussian kernel"""
+
+ def__init__(
+ self,
+ num_dim,
+ width=3,
+ scale=1,
+ ard=False,
+ max_width=1e6,
+ min_width=1e-6,
+ max_scale=1e6,
+ min_scale=1e-6,
+ ):
+"""
+
+ Parameters
+ ----------
+ num_dim: int
+ width: float
+ scale: float
+ ard: bool
+ flag to use Automatic Relevance Determination (ARD).
+ max_width: float
+ Maximum value of width
+ min_width: float
+ Minimum value of width
+ max_scale: float
+ Maximum value of scale
+ min_scale: float
+ Minimum value of scale
+ """
+ self.ard=ard
+ self.num_dim=num_dim
+ self.scale=scale
+ self.max_ln_width=np.log(max_width)
+ self.min_ln_width=np.log(min_width)
+ self.max_ln_scale=np.log(max_scale)
+ self.min_ln_scale=np.log(min_scale)
+
+ ifself.ard:
+ # with ARD
+ self.num_params=num_dim+1
+ ifisinstance(width,np.ndarray)andlen(width)==self.num_dim:
+ self.width=width
+ else:
+ self.width=width*np.ones(self.num_dim)
+ else:
+ # without ARD
+ self.width=width
+ self.num_params=2
+
+ params=self.cat_params(self.width,self.scale)
+ self.set_params(params)
+
+
+[docs]
+ defprint_params(self):
+"""
+ show the current kernel parameters
+
+ """
+
+ print(" Parameters of Gaussian kernel \n ")
+ print(" width = ",+self.width)
+ print(" scale = ",+self.scale)
+ print(" scale2 = ",+self.scale**2)
+ print(" \n")
+[docs]
+ defsupp_params(self,params):
+"""
+ Set maximum (minimum) values for parameters when the parameter is greater(less) than this value.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ params: numpy.ndarray
+
+ """
+ index=np.where(params[0:-1]>self.max_ln_width)
+ params[index[0]]=self.max_ln_width
+
+ index=np.where(params[0:-1]<self.min_ln_width)
+ params[index[0]]=self.min_ln_width
+
+ ifparams[-1]>self.max_ln_scale:
+ params[-1]=self.max_ln_scale
+
+ ifparams[-1]<self.min_ln_scale:
+ params[-1]=self.min_ln_scale
+
+ returnparams
+
+
+
+[docs]
+ defdecomp_params(self,params):
+"""
+ decompose the parameters defined on the log region
+ into width and scale parameters
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ width: float
+ scale: float
+ """
+
+ width=np.exp(params[0:-1])
+ scale=np.exp(params[-1])
+ returnwidth,scale
+
+
+
+[docs]
+ defsave(self,file_name):
+"""
+ save the gaussian kernel
+
+ Parameters
+ ----------
+ file_name: str
+ file name to save the information of the kernel
+
+ """
+ kwarg={
+ "name":"gauss",
+ "params":self.params,
+ "ard":self.ard,
+ "num_dim":self.num_dim,
+ "max_ln_scale":self.max_ln_scale,
+ "min_ln_scale":self.min_ln_scale,
+ "max_ln_width":self.max_ln_width,
+ "min_ln_width":self.min_ln_width,
+ "num_params":self.num_params,
+ }
+ withopen(file_name,"wb")asf:
+ np.savez(f,**kwarg)
+
+
+
+[docs]
+ defload(self,file_name):
+"""
+ Recovering the Gaussian kernel from file
+ Parameters
+ ----------
+ file_name: str
+ file name to load the information of the kernel
+
+ """
+ temp=np.load(file_name)
+
+ self.num_dim=temp["num_dim"]
+ self.ard=temp["ard"]
+ self.max_ln_scale=temp["max_ln_scale"]
+ self.min_ln_scale=temp["min_ln_scale"]
+ self.max_ln_width=temp["max_ln_width"]
+ self.min_ln_width=temp["min_ln_width"]
+ params=temp["params"]
+ self.set_params(params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+from...importmisc
+fromcopyimportdeepcopy
+
+
+
+[docs]
+defeval_marlik(gp,X,t,params=None):
+"""
+ Evaluating marginal likelihood.
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ marlik: float
+ Marginal likelihood.
+ """
+ ndata,ndims=X.shape
+ lik_params,prior_params=gp.decomp_params(params)
+
+ fmu=gp.prior.get_mean(ndata,params=prior_params)
+ G=gp.prior.get_cov(X,params=prior_params)
+ B=gp.lik.get_cov(ndata,params=lik_params)
+
+ A=G+B+1e-8*np.identity(ndata)
+ res=t-fmu
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ alpha=scipy.linalg.solve_triangular(
+ U.transpose(),res,lower=True,overwrite_b=False,check_finite=False
+ )
+ marlik=(
+ 0.5*ndata*np.log(2*np.pi)
+ +np.sum(np.log(np.diag(U)))
+ +0.5*np.inner(alpha,alpha)
+ )
+ returnmarlik
+
+
+
+
+[docs]
+defget_grad_marlik(gp,X,t,params=None):
+"""
+ Evaluating gradiant of marginal likelihood.
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ grad_marlik: numpy.ndarray
+ Gradiant of marginal likelihood.
+ """
+ ndata,ndims=X.shape
+ lik_params,prior_params=gp.decomp_params(params)
+
+ fmu=gp.prior.get_mean(ndata,prior_params)
+ G=gp.prior.get_cov(X,params=prior_params)
+ B=gp.lik.get_cov(ndata,lik_params)
+
+ A=G+B+1e-8*np.identity(ndata)
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ res=t-fmu
+ alpha=misc.gauss_elim(U,res)
+ invA=scipy.linalg.inv(A,check_finite=False)
+
+ grad_marlik=np.zeros(gp.num_params)
+
+""" lik """
+ ifgp.lik.num_params!=0:
+ lik_grad=gp.lik.get_grad(ndata,lik_params)
+ temp=lik_grad.dot(alpha)
+ grad_marlik[0:gp.lik.num_params]=-0.5*temp.dot(
+ alpha
+ )+0.5*misc.traceAB2(invA,lik_grad)
+
+ ntemp=gp.lik.num_params
+""" prior """
+ ifgp.prior.mean.num_params!=0:
+ mean_grad=gp.prior.get_grad_mean(ndata,prior_params)
+ grad_marlik[ntemp:ntemp+gp.prior.mean.num_params]=-np.inner(
+ alpha,mean_grad
+ )
+
+ ntemp+=gp.prior.mean.num_params
+
+ ifgp.prior.cov.num_params!=0:
+ cov_grad=gp.prior.get_grad_cov(X,prior_params)
+ temp=cov_grad.dot(alpha)
+ grad_marlik[ntemp:]=-0.5*temp.dot(alpha)+0.5*misc.traceAB3(
+ invA,cov_grad
+ )
+
+ returngrad_marlik
+
+
+
+
+[docs]
+defprepare(gp,X,t,params=None):
+"""
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ stats: tupple
+ """
+ ndata=X.shape[0]
+ ndims=X.shape[1]
+
+ ifparamsisNone:
+ params=np.copy(gp.params)
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ G=gp.prior.get_cov(X,params=prior_params)
+ fmu=gp.prior.get_mean(ndata,params=prior_params)
+ B=gp.lik.get_cov(ndata,params=lik_params)
+ A=G+B+1e-8*np.identity(ndata)
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ residual=t-fmu
+ alpha=misc.gauss_elim(U,residual)
+ stats=(U,alpha)
+
+ returnstats
+
+
+
+
+[docs]
+defget_post_fmean(gp,X,Z,params=None):
+"""
+ Calculating the mean of posterior
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+
+ ndata=X.shape[0]
+ ndims=X.shape[1]
+ ntest=Z.shape[0]
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ alpha=gp.stats[1]
+
+ fmu=gp.prior.get_mean(ntest)
+ G=gp.prior.get_cov(X=Z,Z=X,params=prior_params)
+
+ returnG.dot(alpha)+fmu
+
+
+
+
+[docs]
+defget_post_fcov(gp,X,Z,params=None,diag=True):
+"""
+ Calculating the covariance of posterior
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+ diag: bool
+ If X is the diagonalization matrix, true.
+ Returns
+ -------
+ numpy.ndarray
+ """
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ U=gp.stats[0]
+ alpha=gp.stats[1]
+
+ G=gp.prior.get_cov(X=X,Z=Z,params=prior_params)
+
+ invUG=scipy.linalg.solve_triangular(
+ U.transpose(),G,lower=True,overwrite_b=False,check_finite=False
+ )
+
+ ifdiag:
+ diagK=gp.prior.get_cov(X=Z,params=prior_params,diag=True)
+ diag_invUG2=misc.diagAB(invUG.transpose(),invUG)
+ post_cov=diagK-diag_invUG2
+ else:
+ K=gp.prior.get_cov(X=Z,params=prior_params)
+ post_cov=K-np.dot(invUG.transpose(),invUG)
+
+ returnpost_cov
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classgauss:
+"""Gaussian likelihood function"""
+
+ def__init__(self,std=1,max_params=1e6,min_params=1e-6):
+"""
+
+ Parameters
+ ----------
+ std: numpy.ndarray or float
+ standard deviation.
+ max_params: float
+ The maximum value of the parameter.
+ If the parameter is greater than this value, it will be replaced by this value.
+ min_params: float
+ The minimum value of the parameter.
+ If the parameter is less than this value, it will be replaced by this value.
+ """
+ self.min_params=np.log(min_params)
+ self.max_params=np.log(max_params)
+ self.num_params=1
+ self.std=std
+ self.params=np.log(std)
+ self.set_params(self.params)
+
+
+[docs]
+ defsupp_params(self,params=None):
+"""
+ Set maximum (minimum) values for parameters when the parameter is greater(less) than this value.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ Returns
+ -------
+
+ """
+ ifparamsisNone:
+ params=np.copy(params)
+
+ ifparams>self.max_params:
+ params=self.max_params
+
+ ifparams<self.min_params:
+ params=self.min_params
+
+ returnparams
+
+
+
+[docs]
+ deftrans_params(self,params=None):
+"""
+ Get exp[params].
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ std: numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ std=np.exp(params)
+ returnstd
+
+
+
+[docs]
+ defget_params_bound(self):
+"""
+ Get boundary array.
+
+ Returns
+ -------
+ bound: list
+ A num_params-dimensional array with the tuple (min_params, max_params).
+ """
+ bound=[(self.min_params,self.max_params)foriinrange(0,self.num_params)]
+ returnbound
+
+
+
+[docs]
+ defget_cov(self,num_data,params=None):
+"""
+ Get a covariance matrix
+
+ Parameters
+ ----------
+ num_data: int
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ numpy.ndarray
+ Diagonal element matrix of exp(2.0*params)
+ """
+ std=self.trans_params(params)
+ var=std**2
+ returnvar*np.identity(num_data)
+
+
+
+[docs]
+ defget_grad(self,num_data,params=None):
+"""
+ Get a gradient matrix
+
+ Parameters
+ ----------
+ num_data: int
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ numpy.ndarray
+ Diagonal element matrix of 2.0 * exp(2.0*params)
+ """
+ std=self.trans_params(params)
+ var=std**2
+ returnvar*np.identity(num_data)*2
+
+
+
+[docs]
+ defset_params(self,params):
+"""
+ Set parameters.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+
+ """
+ self.params=self.supp_params(params)
+ self.std=self.trans_params(params)
+
+
+
+[docs]
+ defget_cand_params(self,t):
+"""
+ Getting candidate parameters.
+
+ Parameters
+ ----------
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
+ Returns
+ -------
+ numpy.ndarray
+ log[ standard deviation of t] - log 10.0
+ """
+ returnnp.log(np.std(t)/10)
+
+
+ # [TODO] Check: This function seems not to be used.
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classconst:
+"""constant"""
+
+ def__init__(self,params=None,max_params=1e12,min_params=-1e12):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters
+ max_params: float
+ Threshold value for specifying the maximum value of the parameter
+ min_params: float
+ Threshold value for specifying the minimum value of the parameter
+
+ """
+ self.max_params=max_params
+ self.min_params=min_params
+ self.init_params(params)
+ self.num_params=1
+
+
+[docs]
+ defget_params_bound(self):
+"""
+ Getting the boundary list for parameters
+
+ Returns
+ -------
+ bound: list
+ num_params array with the tupple (min_param, max_params)
+
+ """
+ bound=[(self.min_params,self.max_params)foriinrange(0,self.num_params)]
+ returnbound
+
+
+
+[docs]
+ defget_mean(self,num_data,params=None):
+"""
+
+ Parameters
+ ----------
+ num_data: int
+ total number of data
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ returnparams*np.ones(num_data)
+
+
+
+[docs]
+ defget_grad(self,num_data,params=None):
+"""
+ Returning a new array of (num_data), filled with ones.
+
+ Parameters
+ ----------
+ num_data: int
+ total number of data
+ params: object
+ not used
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+ returnnp.ones(num_data)
+[docs]
+ defget_cand_params(self,t):
+"""
+ Getting the median array of candidates.
+
+ Parameters
+ ----------
+ t: array_like
+ Input array or object that can be converted to an array
+
+ Returns
+ -------
+ median: numpy.ndarray
+ A new array holding the result.
+
+ """
+ returnnp.median(t)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importphysbo.predictor
+
+
+
+[docs]
+ deffit(self,training,num_basis=None):
+"""
+ Fitting model to training dataset
+
+ Parameters
+ ----------
+ training: physbo.variable
+ dataset for training
+ num_basis: int
+ the number of basis (default: self.config.predict.num_basis)
+ """
+ ifself.model.prior.cov.num_dimisNone:
+ self.model.prior.cov.num_dim=training.X.shape[1]
+ self.model.fit(training.X,training.t,self.config)
+ self.delete_stats()
+[docs]
+ defprepare(self,training):
+"""
+ Initializing model by using training data set
+
+ Parameters
+ ----------
+ training: physbo.variable
+ dataset for training
+
+ """
+ self.model.prepare(training.X,training.t)
+[docs]
+ defget_post_fmean(self,training,test):
+"""
+ Calculating posterior mean value of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.get_post_fmean(training.X,test.X)
+
+
+
+[docs]
+ defget_post_fcov(self,training,test,diag=True):
+"""
+ Calculating posterior variance-covariance matrix of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ diag: bool
+ Diagonlization flag in physbo.exact.get_post_fcov function.
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.get_post_fcov(training.X,test.X,diag=diag)
+
+
+
+[docs]
+ defget_post_samples(self,training,test,alpha=1):
+"""
+ Drawing samples of mean values of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs (not used)
+ alpha: float
+ tuning parameter of the covariance by multiplying alpha**2 for np.random.multivariate_normal.
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.post_sampling(training.X,test.X,alpha=alpha)
+
+
+
+[docs]
+ defget_predict_samples(self,training,test,N=1):
+"""
+ Drawing samples of values of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+
+ Returns
+ -------
+ numpy.ndarray (N x len(test))
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.predict_sampling(training.X,test.X,N=N)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+defcentering(X):
+"""
+ Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ Returns
+ -------
+ X_normalized: numpy array
+ normalized N x d dimensional matrix.
+ """
+ stdX=np.std(X,0)
+ index=np.where(stdX!=0)
+ X_normalized=(X[:,index[0]]-np.mean(X[:,index[0]],0))/stdX[index[0]]
+ returnX_normalized
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+
+
+[docs]
+defgauss_elim(U,t):
+"""
+ Calculate alpha using scipy.linalg.solve_triangular.
+ alpha = (U^T U)^-1 t = U^-1 [(U^T)-1 t]
+
+ Parameters
+ ----------
+ U: (M, M) array_like
+ A triangular matrix
+ t: (M,) or (M, N) array_like
+
+ Returns
+ -------
+ alpha: numpy.ndarray
+ Solution to the system L^T alpha = t. Shape of return matches t.
+ """
+ alpha=scipy.linalg.solve_triangular(
+ U.transpose(),t,lower=True,overwrite_b=False,check_finite=False
+ )
+
+ alpha=scipy.linalg.solve_triangular(
+ U,alpha,lower=False,overwrite_b=False,check_finite=False
+ )
+ returnalpha
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importconfigparser
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classadam:
+"""
+ Optimizer of f(x) with the adam method
+
+ Attributes
+ ==========
+ params: numpy.ndarray
+ current input, x
+ nparams: int
+ dimension
+ grad: function
+ gradient function, g(x) = f'(x)
+ m: numpy.ndarray
+ v: numpy.ndarray
+ epoch: int
+ the number of update already done
+ max_epoch: int
+ the maximum number of update
+ alpha: float
+ beta: float
+ gamma: float
+ epsilon: float
+ """
+
+ def__init__(self,params,grad,options={}):
+"""
+
+ Parameters
+ ==========
+ params:
+ grad:
+ options: dict
+ Hyperparameters for the adam method
+
+ - "alpha" (default: 0.001)
+ - "beta" (default: 0.9)
+ - "gamma" (default: 0.9999)
+ - "epsilon" (default: 1e-8)
+ - "max_epoch" (default: 4000)
+ """
+ self.grad=grad
+ self.params=params
+ self.nparams=params.shape[0]
+ self._set_options(options)
+ self.m=np.zeros(self.nparams)
+ self.v=np.zeros(self.nparams)
+ self.epoch=0
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importpickleaspickle
+importnumpyasnp
+fromphysboimportgp
+
+
+
+[docs]
+classbase_predictor(object):
+"""
+ Base predictor is defined in this class.
+
+ """
+
+ def__init__(self,config,model=None):
+"""
+
+ Parameters
+ ----------
+ config: set_config object (physbo.misc.set_config)
+ model: model object
+ A default model is set as gp.core.model
+ """
+
+ self.config=config
+ self.model=model
+ ifself.modelisNone:
+ self.model=gp.core.model(
+ cov=gp.cov.gauss(num_dim=None,ard=False),
+ mean=gp.mean.const(),
+ lik=gp.lik.gauss(),
+ )
+
+
+[docs]
+ deffit(self,*args,**kwds):
+"""
+
+ Default fit function.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defprepare(self,*args,**kwds):
+"""
+
+ Default prepare function.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defdelete_stats(self,*args,**kwds):
+"""
+
+ Default function to delete status.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_basis(self,*args,**kwds):
+"""
+
+ Default function to get basis
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_post_fmean(self,*args,**kwds):
+"""
+
+ Default function to get a mean value of the score.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_post_fcov(self,*args,**kwds):
+"""
+
+ Default function to get a covariance of the score.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_post_params(self,*args,**kwds):
+"""
+
+ Default function to get parameters.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_post_samples(self,*args,**kwds):
+"""
+
+ Default function to get samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_predict_samples(self,*args,**kwds):
+"""
+
+ Default function to get prediction variables of samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defget_post_params_samples(self,*args,**kwds):
+"""
+
+ Default function to get parameters of samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defupdate(self,*args,**kwds):
+"""
+
+ Default function to update variables.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[docs]
+ defsave(self,file_name):
+"""
+
+ Default function to save information by using pickle.dump function.
+ The protocol version is set as 3.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name to save self.__dict__ object.
+
+ Returns
+ -------
+
+ """
+ withopen(file_name,"wb")asf:
+ pickle.dump(self.__dict__,f,4)
+
+
+
+[docs]
+ defload(self,file_name):
+"""
+
+ Default function to load variables.
+ The information is updated using self.update function.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name to load variables from the file.
+
+ Returns
+ -------
+
+ """
+ withopen(file_name,"rb")asf:
+ tmp_dict=pickle.load(f)
+ self.config=tmp_dict["config"]
+ self.model=tmp_dict["model"]
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickleaspickle
+importitertools
+importtime
+
+from.resultsimporthistory
+from..importutility
+from..importscoreassearch_score
+from...gpimportpredictorasgp_predictor
+from...blmimportpredictorasblm_predictor
+from...miscimportset_config
+
+fromphysbo.variableimportvariable
+
+
+
+[docs]
+classpolicy:
+ def__init__(self,test_X,config=None,initial_data=None,comm=None):
+"""
+
+ Parameters
+ ----------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ config: set_config object (physbo.misc.set_config)
+ initial_data: tuple[np.ndarray, np.ndarray]
+ The initial training datasets.
+ The first elements is the array of actions and the second is the array of value of objective functions
+ comm: MPI.Comm, optional
+ MPI Communicator
+ """
+ self.predictor=None
+ self.training=variable()
+ self.new_data=None
+ self.test=self._make_variable_X(test_X)
+ self.actions=np.arange(0,self.test.X.shape[0])
+ self.history=history()
+ ifconfigisNone:
+ self.config=set_config()
+ else:
+ self.config=config
+
+ ifinitial_dataisnotNone:
+ iflen(initial_data)!=2:
+ msg="ERROR: initial_data should be 2-elements tuple or list (actions and objectives)"
+ raiseRuntimeError(msg)
+ actions,fs=initial_data
+ iflen(actions)!=len(fs):
+ msg="ERROR: len(initial_data[0]) != len(initial_data[1])"
+ raiseRuntimeError(msg)
+ self.write(actions,fs)
+ self.actions=np.array(sorted(list(set(self.actions)-set(actions))))
+
+ ifcommisNone:
+ self.mpicomm=None
+ self.mpisize=1
+ self.mpirank=0
+ else:
+ self.mpicomm=comm
+ self.mpisize=comm.size
+ self.mpirank=comm.rank
+ self.actions=np.array_split(self.actions,self.mpisize)[self.mpirank]
+ self.config.learning.is_disp=(
+ self.config.learning.is_dispandself.mpirank==0
+ )
+
+
+[docs]
+ defset_seed(self,seed):
+"""
+ Setting a seed parameter for np.random.
+
+ Parameters
+ ----------
+ seed: int
+ seed number
+ -------
+
+ """
+ self.seed=seed
+ np.random.seed(self.seed)
+
+
+
+[docs]
+ defwrite(
+ self,
+ action,
+ t,
+ X=None,
+ time_total=None,
+ time_update_predictor=None,
+ time_get_action=None,
+ time_run_simulator=None,
+ ):
+"""
+ Writing history (update history, not output to a file).
+
+ Parameters
+ ----------
+ action: numpy.ndarray
+ Indexes of actions.
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ time_total: numpy.ndarray
+ N dimenstional array. The total elapsed time in each step.
+ If None (default), filled by 0.0.
+ time_update_predictor: numpy.ndarray
+ N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+ If None (default), filled by 0.0.
+ time_get_action: numpy.ndarray
+ N dimenstional array. The elapsed time for getting next action in each step.
+ If None (default), filled by 0.0.
+ time_run_simulator: numpy.ndarray
+ N dimenstional array. The elapsed time for running the simulator in each step.
+ If None (default), filled by 0.0.
+
+ Returns
+ -------
+
+ """
+ ifXisNone:
+ X=self.test.X[action,:]
+ Z=self.test.Z[action,:]ifself.test.ZisnotNoneelseNone
+ else:
+ Z=self.predictor.get_basis(X)ifself.predictorisnotNoneelseNone
+
+ self.history.write(
+ t,
+ action,
+ time_total=time_total,
+ time_update_predictor=time_update_predictor,
+ time_get_action=time_get_action,
+ time_run_simulator=time_run_simulator,
+ )
+ self.training.add(X=X,t=t,Z=Z)
+
+ # remove the selected actions from the list of candidates if exists
+ iflen(self.actions)>0:
+ local_index=np.searchsorted(self.actions,action)
+ local_index=local_index[
+ np.take(self.actions,local_index,mode="clip")==action
+ ]
+ self.actions=self._delete_actions(local_index)
+
+ ifself.new_dataisNone:
+ self.new_data=variable(X=X,t=t,Z=Z)
+ else:
+ self.new_data.add(X=X,t=t,Z=Z)
+
+
+
+[docs]
+ defrandom_search(
+ self,max_num_probes,num_search_each_probe=1,simulator=None,is_disp=True
+ ):
+"""
+ Performing random search.
+
+ Parameters
+ ----------
+ max_num_probes: int
+ Maximum number of random search process.
+ num_search_each_probe: int
+ Number of search at each random search process.
+ simulator: callable
+ Callable (function or object with ``__call__``) from action to t
+ Here, action is an integer which represents the index of the candidate.
+ is_disp: bool
+ If true, process messages are outputted.
+ Returns
+ -------
+ history: history object (physbo.search.discrete.results.history)
+ """
+
+ ifself.mpirank!=0:
+ is_disp=False
+
+ N=int(num_search_each_probe)
+
+ ifis_disp:
+ utility.show_interactive_mode(simulator,self.history)
+
+ forninrange(0,max_num_probes):
+ time_total=time.time()
+ ifis_dispandN>1:
+ utility.show_start_message_multi_search(self.history.num_runs)
+
+ time_get_action=time.time()
+ action=self._get_random_action(N)
+ time_get_action=time.time()-time_get_action
+
+ N_indeed=len(action)
+ ifN_indeed==0:
+ ifself.mpirank==0:
+ print("WARNING: All actions have already searched.")
+ returncopy.deepcopy(self.history)
+
+ ifsimulatorisNone:
+ returnaction
+
+ time_run_simulator=time.time()
+ t=_run_simulator(simulator,action,self.mpicomm)
+ time_run_simulator=time.time()-time_run_simulator
+
+ time_total=time.time()-time_total
+ self.write(
+ action,
+ t,
+ time_total=[time_total]*N_indeed,
+ time_update_predictor=np.zeros(N_indeed,dtype=float),
+ time_get_action=[time_get_action]*N_indeed,
+ time_run_simulator=[time_run_simulator]*N_indeed,
+ )
+
+ ifis_disp:
+ utility.show_search_results(self.history,N_indeed)
+
+ returncopy.deepcopy(self.history)
+
+
+
+[docs]
+ defbayes_search(
+ self,
+ training=None,
+ max_num_probes=None,
+ num_search_each_probe=1,
+ predictor=None,
+ is_disp=True,
+ simulator=None,
+ score="TS",
+ interval=0,
+ num_rand_basis=0,
+ ):
+"""
+ Performing Bayesian optimization.
+
+ Parameters
+ ----------
+ training: physbo.variable
+ Training dataset.
+ max_num_probes: int
+ Maximum number of searching process by Bayesian optimization.
+ num_search_each_probe: int
+ Number of searching by Bayesian optimization at each process.
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ If None, blm_predictor is defined.
+ is_disp: bool
+ If true, process messages are outputted.
+ simulator: callable
+ Callable (function or object with ``__call__``)
+ Here, action is an integer which represents the index of the candidate.
+ score: str
+ The type of aquision funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ interval: int
+ The interval number of learning the hyper parameter.
+ If you set the negative value to interval, the hyper parameter learning is not performed.
+ If you set zero to interval, the hyper parameter learning is performed only at the first step.
+ num_rand_basis: int
+ The number of basis function. If you choose 0, ordinary Gaussian process run.
+
+ Returns
+ -------
+ history: history object (physbo.search.discrete.results.history)
+ """
+
+ ifself.mpirank!=0:
+ is_disp=False
+
+ old_disp=self.config.learning.is_disp
+ self.config.learning.is_disp=is_disp
+
+ ifmax_num_probesisNone:
+ max_num_probes=1
+ simulator=None
+
+ is_rand_expans=num_rand_basis!=0
+
+ iftrainingisnotNone:
+ self.training=training
+
+ ifpredictorisnotNone:
+ self.predictor=predictor
+ elifself.predictorisNone:
+ self._init_predictor(is_rand_expans)
+
+ ifmax_num_probes==0andinterval>=0:
+ self._learn_hyperparameter(num_rand_basis)
+
+ N=int(num_search_each_probe)
+
+ forninrange(max_num_probes):
+ time_total=time.time()
+
+ time_update_predictor=time.time()
+ ifutility.is_learning(n,interval):
+ self._learn_hyperparameter(num_rand_basis)
+ else:
+ self._update_predictor()
+ time_update_predictor=time.time()-time_update_predictor
+
+ ifnum_search_each_probe!=1:
+ utility.show_start_message_multi_search(self.history.num_runs,score)
+
+ time_get_action=time.time()
+ K=self.config.search.multi_probe_num_sampling
+ alpha=self.config.search.alpha
+ action=self._get_actions(score,N,K,alpha)
+ time_get_action=time.time()-time_get_action
+
+ N_indeed=len(action)
+ ifN_indeed==0:
+ ifself.mpirank==0:
+ print("WARNING: All actions have already searched.")
+ break
+
+ ifsimulatorisNone:
+ self.config.learning.is_disp=old_disp
+ returnaction
+
+ time_run_simulator=time.time()
+ t=_run_simulator(simulator,action,self.mpicomm)
+ time_run_simulator=time.time()-time_run_simulator
+
+ time_total=time.time()-time_total
+ self.write(
+ action,
+ t,
+ time_total=[time_total]*N_indeed,
+ time_update_predictor=[time_update_predictor]*N_indeed,
+ time_get_action=[time_get_action]*N_indeed,
+ time_run_simulator=[time_run_simulator]*N_indeed,
+ )
+
+ ifis_disp:
+ utility.show_search_results(self.history,N_indeed)
+ self._update_predictor()
+ self.config.learning.is_disp=old_disp
+ returncopy.deepcopy(self.history)
+
+
+ @staticmethod
+ def_warn_no_predictor(method_name):
+ print("Warning: Since policy.predictor is not yet set,")
+ print(" a GP predictor (num_rand_basis=0) is used for predicting")
+ print(" If you want to use a BLM predictor (num_rand_basis>0),")
+ print(" call bayes_search(max_num_probes=0, num_rand_basis=nrb)")
+ print(" before calling {}.".format(method_name))
+
+
+[docs]
+ defget_post_fmean(self,xs):
+"""Calculate mean value of predictor (post distribution)"""
+ X=self._make_variable_X(xs)
+ ifself.predictorisNone:
+ self._warn_no_predictor("get_post_fmean()")
+ predictor=gp_predictor(self.config)
+ predictor.fit(self.training,0)
+ predictor.prepare(self.training)
+ returnpredictor.get_post_fmean(self.training,X)
+ else:
+ self._update_predictor()
+ returnself.predictor.get_post_fmean(self.training,X)
+[docs]
+ defget_score(
+ self,
+ mode,
+ *,
+ actions=None,
+ xs=None,
+ predictor=None,
+ training=None,
+ parallel=True,
+ alpha=1
+ ):
+"""
+ Calcualte score (acquisition function)
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquisition funciton. TS, EI and PI are available.
+ These functions are defined in score.py.
+ actions: array of int
+ actions to calculate score
+ xs: physbo.variable or np.ndarray
+ input parameters to calculate score
+ predictor: predictor object
+ predictor used to calculate score.
+ If not given, self.predictor will be used.
+ training:physbo.variable
+ Training dataset.
+ If not given, self.training will be used.
+ parallel: bool
+ Calculate scores in parallel by MPI (default: True)
+ alpha: float
+ Tuning parameter which is used if mode = TS.
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+ Returns
+ -------
+ f: float or list of float
+ Score defined in each mode.
+
+ Raises
+ ------
+ RuntimeError
+ If both *actions* and *xs* are given
+
+ Notes
+ -----
+ When neither *actions* nor *xs* are given, scores for actions not yet searched will be calculated.
+
+ When *parallel* is True, it is assumed that the function receives the same input (*actions* or *xs*) for all the ranks.
+ If you want to split the input array itself, set *parallel* be False and merge results by yourself.
+ """
+ iftrainingisNone:
+ training=self.training
+
+ iftraining.XisNoneortraining.X.shape[0]==0:
+ msg="ERROR: No training data is registered."
+ raiseRuntimeError(msg)
+
+ ifpredictorisNone:
+ ifself.predictorisNone:
+ self._warn_no_predictor("get_score()")
+ predictor=gp_predictor(self.config)
+ predictor.fit(training,0)
+ predictor.prepare(training)
+ else:
+ self._update_predictor()
+ predictor=self.predictor
+
+ ifxsisnotNone:
+ ifactionsisnotNone:
+ raiseRuntimeError("ERROR: both actions and xs are given")
+ test=self._make_variable_X(xs)
+ ifparallelandself.mpisize>1:
+ actions=np.array_split(np.arange(test.X.shape[0]),self.mpisize)
+ test=test.get_subset(actions[self.mpirank])
+ else:
+ ifactionsisNone:
+ actions=self.actions
+ else:
+ ifisinstance(actions,int):
+ actions=[actions]
+ ifparallelandself.mpisize>1:
+ actions=np.array_split(actions,self.mpisize)[self.mpirank]
+ test=self.test.get_subset(actions)
+
+ f=search_score.score(
+ mode,predictor=predictor,training=training,test=test,alpha=alpha
+ )
+ ifparallelandself.mpisize>1:
+ fs=self.mpicomm.allgather(f)
+ f=np.hstack(fs)
+ returnf
+
+
+ def_get_marginal_score(self,mode,chosen_actions,K,alpha):
+"""
+ Getting marginal scores.
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquision funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ These functions are defined in score.py.
+ chosen_actions: numpy.ndarray
+ Array of selected actions.
+ K: int
+ The number of samples for evaluating score.
+ alpha: float
+ not used.
+
+ Returns
+ -------
+ f: list
+ N dimensional scores (score is defined in each mode)
+ """
+ f=np.zeros((K,len(self.actions)),dtype=float)
+
+ # draw K samples of the values of objective function of chosen actions
+ new_test_local=self.test.get_subset(chosen_actions)
+ virtual_t_local=self.predictor.get_predict_samples(
+ self.training,new_test_local,K
+ )
+ ifself.mpisize==1:
+ new_test=new_test_local
+ virtual_t=virtual_t_local
+ else:
+ new_test=variable()
+ forntinself.mpicomm.allgather(new_test_local):
+ new_test.add(X=nt.X,t=nt.t,Z=nt.Z)
+ virtual_t=np.concatenate(self.mpicomm.allgather(virtual_t_local),axis=1)
+ # virtual_t = self.predictor.get_predict_samples(self.training, new_test, K)
+
+ forkinrange(K):
+ predictor=copy.deepcopy(self.predictor)
+ train=copy.deepcopy(self.training)
+ virtual_train=new_test
+ virtual_train.t=virtual_t[k,:]
+
+ ifvirtual_train.ZisNone:
+ train.add(virtual_train.X,virtual_train.t)
+ else:
+ train.add(virtual_train.X,virtual_train.t,virtual_train.Z)
+
+ predictor.update(train,virtual_train)
+
+ f[k,:]=self.get_score(
+ mode,predictor=predictor,training=train,parallel=False
+ )
+ returnnp.mean(f,axis=0)
+
+ def_get_actions(self,mode,N,K,alpha):
+"""
+ Getting next candidates
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquisition funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ These functions are defined in score.py.
+ N: int
+ The total number of actions to return.
+ K: int
+ The total number of samples to evaluate marginal score
+ alpha: float
+ Tuning parameter which is used if mode = TS.
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+ Returns
+ -------
+ chosen_actions: numpy.ndarray
+ An N-dimensional array of actions selected in each search process.
+ """
+ f=self.get_score(
+ mode,
+ predictor=self.predictor,
+ training=self.training,
+ alpha=alpha,
+ parallel=False,
+ )
+ champion,local_champion,local_index=self._find_champion(f)
+ ifchampion==-1:
+ returnnp.zeros(0,dtype=int)
+ ifchampion==local_champion:
+ self.actions=self._delete_actions(local_index)
+
+ chosen_actions=[champion]
+ forninrange(1,N):
+ f=self._get_marginal_score(mode,chosen_actions[0:n],K,alpha)
+ champion,local_champion,local_index=self._find_champion(f)
+ ifchampion==-1:
+ break
+ ifchampion==local_champion:
+ self.actions=self._delete_actions(local_index)
+ chosen_actions.append(champion)
+ returnnp.array(chosen_actions)
+
+ def_find_champion(self,f):
+ iflen(f)==0:
+ local_fmax=-float("inf")
+ local_index=-1
+ local_champion=-1
+ else:
+ local_fmax=np.max(f)
+ local_index=np.argmax(f)
+ local_champion=self.actions[local_index]
+ ifself.mpisize==1:
+ champion=local_champion
+ else:
+ local_champions=self.mpicomm.allgather(local_champion)
+ local_fs=self.mpicomm.allgather(local_fmax)
+ champion_rank=np.argmax(local_fs)
+ champion=local_champions[champion_rank]
+ returnchampion,local_champion,local_index
+
+ def_get_random_action(self,N):
+"""
+ Getting indexes of actions randomly.
+
+ Parameters
+ ----------
+ N: int
+ Total number of search candidates.
+ Returns
+ -------
+ action: numpy.ndarray
+ Indexes of actions selected randomly from search candidates.
+ """
+ ifself.mpisize==1:
+ n=len(self.actions)
+ ifn<=N:
+ index=np.arange(0,n)
+ else:
+ index=np.random.choice(len(self.actions),N,replace=False)
+ action=self.actions[index]
+ self.actions=self._delete_actions(index)
+ else:
+ nactions=self.mpicomm.gather(len(self.actions),root=0)
+ local_indices=[[]for_inrange(self.mpisize)]
+ ifself.mpirank==0:
+ hi=np.add.accumulate(nactions)
+ lo=np.roll(hi,1)
+ lo[0]=0
+ ifhi[-1]<=N:
+ index=np.arange(0,hi[-1])
+ else:
+ index=np.random.choice(hi[-1],N,replace=False)
+ ranks=np.searchsorted(hi,index,side="right")
+ forr,iinzip(ranks,index):
+ local_indices[r].append(i-lo[r])
+ local_indices=self.mpicomm.scatter(local_indices,root=0)
+ local_actions=self.actions[local_indices]
+ self.actions=self._delete_actions(local_indices)
+ action=self.mpicomm.allgather(local_actions)
+ action=itertools.chain.from_iterable(action)
+ action=np.array(list(action))
+ returnaction
+
+
+[docs]
+ defsave(self,file_history,file_training=None,file_predictor=None):
+"""
+
+ Saving history, training and predictor into the corresponding files.
+
+ Parameters
+ ----------
+ file_history: str
+ The name of the file that stores the information of the history.
+ file_training: str
+ The name of the file that stores the training dataset.
+ file_predictor: str
+ The name of the file that stores the predictor dataset.
+
+ Returns
+ -------
+
+ """
+ ifself.mpirank==0:
+ self.history.save(file_history)
+
+ iffile_trainingisnotNone:
+ self.training.save(file_training)
+
+ iffile_predictorisnotNone:
+ withopen(file_predictor,"wb")asf:
+ pickle.dump(self.predictor,f)
+
+
+
+[docs]
+ defload(self,file_history,file_training=None,file_predictor=None):
+"""
+
+ Loading files about history, training and predictor.
+
+ Parameters
+ ----------
+ file_history: str
+ The name of the file that stores the information of the history.
+ file_training: str
+ The name of the file that stores the training dataset.
+ file_predictor: str
+ The name of the file that stores the predictor dataset.
+
+ Returns
+ -------
+
+ """
+ self.history.load(file_history)
+
+ iffile_trainingisNone:
+ N=self.history.total_num_search
+ X=self.test.X[self.history.chosen_actions[0:N],:]
+ t=self.history.fx[0:N]
+ self.training=variable(X=X,t=t)
+ else:
+ self.training=variable()
+ self.training.load(file_training)
+
+ iffile_predictorisnotNone:
+ withopen(file_predictor,"rb")asf:
+ self.predictor=pickle.load(f)
+
+ N=self.history.total_num_search
+
+ visited=self.history.chosen_actions[:N]
+ local_index=np.searchsorted(self.actions,visited)
+ local_index=local_index[
+ np.take(self.actions,local_index,mode="clip")==visited
+ ]
+ self.actions=self._delete_actions(local_index)
+[docs]
+ defexport_training(self):
+"""
+ Returning the training dataset
+
+ Returns
+ -------
+
+ """
+ returnself.training
+
+
+
+[docs]
+ defexport_history(self):
+"""
+ Returning the information of the history.
+
+ Returns
+ -------
+
+ """
+ returnself.history
+
+
+ def_init_predictor(self,is_rand_expans):
+"""
+ Initialize predictor.
+
+ Parameters
+ ----------
+ is_rand_expans: bool
+ If true, physbo.blm.predictor is selected.
+ If false, physbo.gp.predictor is selected.
+ """
+ ifis_rand_expans:
+ self.predictor=blm_predictor(self.config)
+ else:
+ self.predictor=gp_predictor(self.config)
+
+ def_learn_hyperparameter(self,num_rand_basis):
+ self.predictor.fit(self.training,num_rand_basis)
+ self.test.Z=self.predictor.get_basis(self.test.X)
+ self.training.Z=self.predictor.get_basis(self.training.X)
+ self.predictor.prepare(self.training)
+ self.new_data=None
+
+ def_update_predictor(self):
+ ifself.new_dataisnotNone:
+ self.predictor.update(self.training,self.new_data)
+ self.new_data=None
+
+ def_make_variable_X(self,test_X):
+"""
+ Make a new *variable* with X=test_X
+
+ Parameters
+ ----------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ Returns
+ -------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ """
+ ifisinstance(test_X,np.ndarray):
+ test=variable(X=test_X)
+ elifisinstance(test_X,variable):
+ test=test_X
+ else:
+ raiseTypeError("The type of test_X must be ndarray or physbo.variable")
+ returntest
+
+ def_delete_actions(self,index,actions=None):
+"""
+ Returns remaining actions
+
+ Notes
+ -----
+ This method itself does not modify *self*
+
+ Parameters
+ ----------
+ index: int
+ Index of an action to be deleted.
+ actions: numpy.ndarray
+ Array of actions.
+ Returns
+ -------
+ actions: numpy.ndarray
+ Array of actions which does not include action specified by index.
+ """
+ ifactionsisNone:
+ actions=self.actions
+ returnnp.delete(actions,index)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickle
+
+from..importutility
+
+MAX_SEARCH=int(30000)
+
+
+
+[docs]
+ defwrite(
+ self,
+ t,
+ action,
+ time_total=None,
+ time_update_predictor=None,
+ time_get_action=None,
+ time_run_simulator=None,
+ ):
+"""
+ Overwrite fx and chosen_actions by t and action.
+
+ Parameters
+ ----------
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ action: numpy.ndarray
+ N dimensional array. The indexes of actions of each search candidate.
+ time_total: numpy.ndarray
+ N dimenstional array. The total elapsed time in each step.
+ If None (default), filled by 0.0.
+ time_update_predictor: numpy.ndarray
+ N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+ If None (default), filled by 0.0.
+ time_get_action: numpy.ndarray
+ N dimenstional array. The elapsed time for getting next action in each step.
+ If None (default), filled by 0.0.
+ time_run_simulator: numpy.ndarray
+ N dimenstional array. The elapsed time for running the simulator in each step.
+ If None (default), filled by 0.0.
+ Returns
+ -------
+
+ """
+ N=utility.length_vector(t)
+ st=self.total_num_search
+ en=st+N
+
+ self.terminal_num_run[self.num_runs]=en
+ self.fx[st:en]=t
+ self.chosen_actions[st:en]=action
+ self.num_runs+=1
+ self.total_num_search+=N
+
+ iftime_totalisNone:
+ time_total=np.zeros(N,dtype=float)
+ self.time_total_[st:en]=time_total
+
+ iftime_update_predictorisNone:
+ time_update_predictor=np.zeros(N,dtype=float)
+ self.time_update_predictor_[st:en]=time_update_predictor
+
+ iftime_get_actionisNone:
+ time_get_action=np.zeros(N,dtype=float)
+ self.time_get_action_[st:en]=time_get_action
+
+ iftime_run_simulatorisNone:
+ time_run_simulator=np.zeros(N,dtype=float)
+ self.time_run_simulator_[st:en]=time_run_simulator
+
+
+
+[docs]
+ defexport_sequence_best_fx(self):
+"""
+ Export fx and actions at each sequence.
+ (The total number of data is num_runs.)
+
+ Returns
+ -------
+ best_fx: numpy.ndarray
+ best_actions: numpy.ndarray
+ """
+ best_fx=np.zeros(self.num_runs,dtype=float)
+ best_actions=np.zeros(self.num_runs,dtype=int)
+ forninrange(self.num_runs):
+ index=np.argmax(self.fx[0:self.terminal_num_run[n]])
+ best_actions[n]=self.chosen_actions[index]
+ best_fx[n]=self.fx[index]
+
+ returnbest_fx,best_actions
+
+
+
+[docs]
+ defexport_all_sequence_best_fx(self):
+"""
+ Export all fx and actions at each sequence.
+ (The total number of data is total_num_research.)
+
+ Returns
+ -------
+ best_fx: numpy.ndarray
+ best_actions: numpy.ndarray
+ """
+ best_fx=np.zeros(self.total_num_search,dtype=float)
+ best_actions=np.zeros(self.total_num_search,dtype=int)
+ best_fx[0]=self.fx[0]
+ best_actions[0]=self.chosen_actions[0]
+
+ forninrange(1,self.total_num_search):
+ ifbest_fx[n-1]<self.fx[n]:
+ best_fx[n]=self.fx[n]
+ best_actions[n]=self.chosen_actions[n]
+ else:
+ best_fx[n]=best_fx[n-1]
+ best_actions[n]=best_actions[n-1]
+
+ returnbest_fx,best_actions
+
+
+
+[docs]
+ defsave(self,filename):
+"""
+ Save the information of the history.
+
+ Parameters
+ ----------
+ filename: str
+ The name of the file which stores the information of the history
+ Returns
+ -------
+
+ """
+ N=self.total_num_search
+ M=self.num_runs
+ np.savez_compressed(
+ filename,
+ num_runs=M,
+ total_num_search=N,
+ fx=self.fx[0:N],
+ chosen_actions=self.chosen_actions[0:N],
+ terminal_num_run=self.terminal_num_run[0:M],
+ )
+
+
+
+[docs]
+ defload(self,filename):
+"""
+ Load the information of the history.
+
+ Parameters
+ ----------
+ filename: str
+ The name of the file which stores the information of the history
+ Returns
+ -------
+
+ """
+ data=np.load(filename)
+ M=int(data["num_runs"])
+ N=int(data["total_num_search"])
+ self.num_runs=M
+ self.total_num_search=N
+ self.fx[0:N]=data["fx"]
+ self.chosen_actions[0:N]=data["chosen_actions"]
+ self.terminal_num_run[0:M]=data["terminal_num_run"]
Source code for physbo.search.discrete_multi.policy
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickleaspickle
+importtime
+
+from.resultsimporthistory
+from..importdiscrete
+from..importutility
+from..importscore_multiassearch_score
+from...gpimportpredictorasgp_predictor
+from...blmimportpredictorasblm_predictor
+from...miscimportset_config
+from...variableimportvariable
+
+fromtypingimportList,Optional
+
+
+
Source code for physbo.search.discrete_multi.results
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importpickle
+importcopy
+
+from..importpareto
+
+MAX_SEARCH=int(30000)
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classRectangles(object):
+ def__init__(self,n_dim,dtype):
+"""
+ Initilize a set of hyper-rectangle.
+
+ :param n_dim: dimension of rectangles
+ """
+ self.n_dim=n_dim
+ self.lb=np.zeros((0,self.n_dim),dtype=dtype)
+ self.ub=np.zeros((0,self.n_dim),dtype=dtype)
+
+
+[docs]
+ defadd(self,lb,ub):
+"""
+ Add new rectangles.
+
+ :param lb: lower bounds of rectangles
+ :param ub: upper bounds of rectangles
+ """
+ self.lb=np.r_[self.lb,lb]
+ self.ub=np.r_[self.ub,ub]
+
+
+
+
+
+[docs]
+defdominate(t1,t2):
+"""domination rule for maximization problem"""
+ returnnp.all(t1>=t2)andnp.any(t1>t2)
+
+
+ def__divide_2d(self):
+"""
+ Divide non-dominated region into vertical rectangles for the case of 2-objectives.
+
+ Assumes that Pareto set has been sorted on the first objective in ascending order.
+
+ Notes:
+ In 2-dimensional cases, the second objective has be sorted in decending order.
+ """
+ n_cells=self.front.shape[0]+1
+ lb_idx=[[i,(i+1)%n_cells]foriinrange(n_cells)]
+ ub_idx=[[i+1,n_cells]foriinrange(n_cells)]
+
+ self.cells.add(lb_idx,ub_idx)
+
+ def__included_in_non_dom_region(self,p):
+ returnnp.all([np.any(pf<=p)forpfinself.front])
+
+ def__divide_using_binary_search(self):
+ front=np.r_[
+ np.full((1,self.num_objectives),-np.inf),
+ self.front,
+ np.full((1,self.num_objectives),np.inf),
+ ]
+
+ # Pareto front indices when sorted on each dimension's front value in ascending order.
+ # (indices start from 1)
+ # Index 0 means anti-ideal value, index `self.front.shape[0] + 1` means ideal point.
+ front_idx=np.r_[
+ np.zeros((1,self.num_objectives),dtype=int),
+ np.argsort(self.front,axis=0)+1,
+ np.full((1,self.num_objectives),self.front.shape[0]+1,dtype=int),
+ ]
+
+ rect_candidates=[[np.copy(front_idx[0]),np.copy(front_idx[-1])]]
+
+ whilerect_candidates:
+ rect=rect_candidates.pop()
+
+ lb_idx=[front_idx[rect[0][d],d]fordinrange(self.num_objectives)]
+ ub_idx=[front_idx[rect[1][d],d]fordinrange(self.num_objectives)]
+ lb=[front[lb_idx[d],d]fordinrange(self.num_objectives)]
+ ub=[front[ub_idx[d],d]fordinrange(self.num_objectives)]
+
+ ifself.__included_in_non_dom_region(lb):
+ self.cells.add([lb_idx],[ub_idx])
+
+ elifself.__included_in_non_dom_region(ub):
+ rect_sizes=rect[1]-rect[0]
+
+ # divide rectangle by the dimension with largest size
+ ifnp.any(rect_sizes>1):
+ div_dim=np.argmax(rect_sizes)
+ div_point=rect[0][div_dim]+int(round(rect_sizes[div_dim]/2.0))
+
+ # add divided left rectangle
+ left_ub_idx=np.copy(rect[1])
+ left_ub_idx[div_dim]=div_point
+ rect_candidates.append([np.copy(rect[0]),left_ub_idx])
+
+ # add divided right rectangle
+ right_lb_idx=np.copy(rect[0])
+ right_lb_idx[div_dim]=div_point
+ rect_candidates.append([right_lb_idx,np.copy(rect[1])])
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy.stats
+
+
+
+[docs]
+defscore(mode,predictor,test,training=None,**kwargs):
+"""
+ Calculate scores (acquisition function) for test data.
+
+ Parameters
+ ----------
+ mode: str
+ Kind of score.
+
+ "EI", "PI", and "TS" are available.
+
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+
+ test: physbo.variable
+ Inputs
+
+ Other Parameters
+ ----------------
+ fmax: float
+ Max value of mean of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Used only for mode == "EI" and "PI"
+
+ alpha: float
+ noise for sampling source (default: 1.0)
+ Used only for mode == "TS"
+
+ Returns
+ -------
+ score: numpy.ndarray
+
+ Raises
+ ------
+ NotImplementedError
+ If unknown mode is given
+ """
+
+ iftest.X.shape[0]==0:
+ returnnp.zeros(0)
+
+ ifmode=="EI":
+ fmax=kwargs.get("fmax",None)
+ returnEI(predictor,training,test,fmax)
+ elifmode=="PI":
+ fmax=kwargs.get("fmax",None)
+ returnPI(predictor,training,test,fmax)
+ elifmode=="TS":
+ alpha=kwargs.get("alpha",1.0)
+ returnTS(predictor,training,test,alpha)
+ else:
+ raiseNotImplementedError("ERROR: mode must be EI, PI or TS.")
+
+
+
+
+[docs]
+defEI(predictor,training,test,fmax=None):
+"""
+ Maximum expected improvement.
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ fmax: float
+ Max value of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ fmean=predictor.get_post_fmean(training,test)
+ fcov=predictor.get_post_fcov(training,test)
+ fstd=np.sqrt(fcov)
+
+ iffmaxisNone:
+ fmax=np.max(predictor.get_post_fmean(training,training))
+
+ temp1=fmean-fmax
+ temp2=temp1/fstd
+ score=temp1*scipy.stats.norm.cdf(temp2)+fstd*scipy.stats.norm.pdf(temp2)
+ returnscore
+
+
+
+
+[docs]
+defPI(predictor,training,test,fmax=None):
+"""
+ Maximum probability of improvement.
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ fmax: float
+ Max value of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ fmean=predictor.get_post_fmean(training,test)
+ fcov=predictor.get_post_fcov(training,test)
+ fstd=np.sqrt(fcov)
+
+ iffmaxisNone:
+ fmax=np.max(predictor.get_post_fmean(training,training))
+
+ temp=(fmean-fmax)/fstd
+ score=scipy.stats.norm.cdf(temp)
+ returnscore
+
+
+
+
+[docs]
+defTS(predictor,training,test,alpha=1):
+"""
+ Thompson sampling (See Sec. 2.1 in Materials Discovery Volume 4, June 2016, Pages 18-21)
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ return(predictor.get_post_samples(training,test,alpha=alpha)).flatten()
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy.stats
+
+from.paretoimportPareto
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[docs]
+classvariable(object):
+ def__init__(self,X=None,t=None,Z=None):
+"""
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ Z:
+
+ """
+ self.X=X
+ self.Z=Z
+ self.t=t
+
+
+[docs]
+ defget_subset(self,index):
+"""
+ Getting subset of variables.
+
+ Parameters
+ ----------
+ index: int or array of int
+ Index of selected action.
+ Returns
+ -------
+ variable: physbo.variable
+ """
+ temp_X=self.X[index,:]ifself.XisnotNoneelseNone
+ temp_t=self.t[index]ifself.tisnotNoneelseNone
+ temp_Z=self.Z[index,:]ifself.ZisnotNoneelseNone
+
+ returnvariable(X=temp_X,t=temp_t,Z=temp_Z)
+
+
+
+[docs]
+ defdelete(self,num_row):
+"""
+ Deleting variables of X, t, Z whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ self.delete_X(num_row)
+ self.delete_t(num_row)
+ self.delete_Z(num_row)
+
+
+
+[docs]
+ defadd(self,X=None,t=None,Z=None):
+"""
+ Adding variables of X, t, Z.
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ Z
+
+ Returns
+ -------
+
+ """
+ self.add_X(X)
+ self.add_t(t)
+ self.add_Z(Z)
+
+
+
+[docs]
+ defdelete_X(self,num_row):
+"""
+ Deleting variables of X whose indexes are specified by num_row.
+
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.XisnotNone:
+ self.X=np.delete(self.X,num_row,0)
+
+
+
+[docs]
+ defdelete_t(self,num_row):
+"""
+ Deleting variables of t whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.tisnotNone:
+ self.t=np.delete(self.t,num_row)
+
+
+
+[docs]
+ defdelete_Z(self,num_row):
+"""
+ Deleting variables of Z whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.ZisnotNone:
+ self.Z=np.delete(self.Z,num_row,0)
+
+
+
+[docs]
+ defadd_X(self,X=None):
+"""
+ Adding variable X. If self.X is None, self.X is set as X.
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
+ Returns
+ -------
+
+ """
+ ifXisnotNone:
+ ifself.XisnotNone:
+ self.X=np.vstack((self.X,X))
+ else:
+ self.X=X
+
+
+
+[docs]
+ defadd_t(self,t=None):
+"""
+ Adding variable t. If self.t is None, self.t is set as t.
+
+ Parameters
+ ----------
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
+ Returns
+ -------
+
+ """
+ ifnotisinstance(t,np.ndarray):
+ t=np.array([t])
+
+ iftisnotNone:
+ ifself.tisnotNone:
+ self.t=np.hstack((self.t,t))
+ else:
+ self.t=t
+
+
+
+[docs]
+ defadd_Z(self,Z=None):
+"""
+ Adding variable Z. If self.Z is None, self.Z is set as Z.
+
+ Parameters
+ ----------
+ Z
+
+ Returns
+ -------
+
+ """
+ ifZisnotNone:
+ ifself.ZisNone:
+ self.Z=Z
+ else:
+ self.Z=np.vstack((self.Z,Z))
+
+
+
+[docs]
+ defsave(self,file_name):
+"""
+ Saving variables X, t, Z to the file.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name for saving variables X, t, Z using numpy.savez_compressed.
+
+ Returns
+ -------
+
+ """
+ np.savez_compressed(file_name,X=self.X,t=self.t,Z=self.Z)
+
+
+
+[docs]
+ defload(self,file_name):
+"""
+ Loading variables X, t, Z from the file.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name for loading variables X, t, Z using numpy.load.
+
+ Returns
+ -------
+
+ """
+ data=np.load(file_name,allow_pickle=True)
+ self.X=data["X"]
+ self.t=data["t"]
+ self.Z=data["Z"]
+
+
+
+
\ No newline at end of file
diff --git a/manual/v2.0.2/en/_sources/acknowledgement.rst.txt b/manual/v2.0.2/en/_sources/acknowledgement.rst.txt
new file mode 100644
index 00000000..7991efa7
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/acknowledgement.rst.txt
@@ -0,0 +1,4 @@
+***************************
+Acknowledgement
+***************************
+We would like to thank the support from “Project for advancement of software usability in materials science” by The Institute for Solid State Physics, The University of Tokyo, for development of PHYSBO.
diff --git a/manual/v2.0.2/en/_sources/algorithm.rst.txt b/manual/v2.0.2/en/_sources/algorithm.rst.txt
new file mode 100644
index 00000000..3f8c01d4
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/algorithm.rst.txt
@@ -0,0 +1,167 @@
+.. _chap_algorithm:
+
+Algorithm
+=====================
+This section describes an overview of Bayesian optimization. For technical details, please refer to `this reference `_ .
+
+Bayesian optimization
+---------------------
+Bayesian optimization is a method that can be used in complex simulations or real-world experimental tasks where the evaluation of the objective function (e.g., property values) is very costly. In other words, Bayesian optimization solves the problem of finding explanatory variables (material composition, structure, process and simulation parameters, etc.) that have a better objective function (material properties, etc.) with as few experiments and simulations as possible. In Bayesian optimization, we start from a situation where we have a list of candidates for the explanatory variables to be searched (represented by the vector :math:`{\bf x}`). Then, from among the candidates, the one that is expected to improve the objective function :math:`y` is selected by making good use of prediction by machine learning (using Gaussian process regression). We then evaluate the value of the objective function by performing experiments and simulations on the candidates. By repeating the process of selection by machine learning and evaluation by experimental simulation, optimization can be achieved in as few times as possible.
+
+The details of the Bayesian optimization algorithm are described below.
+
+- Step1: Initialization
+
+Prepare the space to be explored in advance. In other words, list up the composition, structure, process, simulation parameters, etc. of the candidate materials as a vector :math:`{\bf x}`. At this stage, the value of the objective function is not known. A few candidates are chosen as initial conditions and the value of the objective function :math:`y` is estimated by experiment or simulation. This gives us the training data :math:`D = \{ {\bf x}_i, y_i \}_{(i=1, \cdots, N)}` with the explanatory variables :math:`{\bf x}` and the objective function :math:`y`.
+
+- Step2: Selection of candidates
+
+Using the training data, learn a Gaussian process. For Gaussian process, the mean of the predictions at arbitary :math:`{\bf x}` is :math:`\mu_c ({\bf x})` and the variance is :math:`\sigma_c ({\bf x})` are given as follows
+
+.. math::
+
+ \mu_c ({\bf x}) &= {\bf k}({\bf x})^T (K+\sigma^2 I)^{-1}{\bf y},
+
+ \sigma_c({\bf x}) &= k({\bf x}, {\bf x}) + \sigma^2 - {\bf k}({\bf x})^T (K+\sigma^2 I)^{-1}{\bf k}({\bf x}),
+
+where :math:`k({\bf x}, {\bf x}')` is a function called as a kernel, and it represents the similarity of two vectors. In general, the following Gaussian kernel is used:
+
+.. math::
+
+ k({\bf x}, {\bf x}') = \exp \left[ -\frac{1}{2\eta^2}||{\bf x} - {\bf x}'||^2 \right].
+
+Using this kernel function, :math:`{\bf k}({\bf x})` and :math:`K` are computed as follows
+
+.. math::
+
+ {\bf k}({\bf x}) = \left( k({\bf x}_1, {\bf x}), k({\bf x}_2, {\bf x}), \cdots, k({\bf x}_N, {\bf x}) \right)^\top
+
+.. math::
+ :nowrap:
+
+ \[
+ K = \left(
+ \begin{array}{cccc}
+ k({\bf x}_1, {\bf x}_1) & k({\bf x}_1, {\bf x}_2) & \ldots & k({\bf x}_1, {\bf x}_N) \\
+ k({\bf x}_2, {\bf x}_1) & k({\bf x}_2, {\bf x}_2) & \ldots & k({\bf x}_2, {\bf x}_N) \\
+ \vdots & \vdots & \ddots & \vdots \\
+ k({\bf x}_N, {\bf x}_1) & k({\bf x}_N, {\bf x}_2) & \ldots & k({\bf x}_N, {\bf x}_N)
+ \end{array}
+ \right)
+ \]
+
+For all candidates that have not yet been tested or simulated, the prediction :math:`\mu_c ({\bf x})` and the variance associated with the uncertainty of the prediction :math:`\sigma_c ({\bf x})` are estimated. Using this, the acquisition function is calculated. Then, the candidate :math:`{\bf x}^*` is selected that maximizes the acquisition function from among the candidates for which we do not yet know the value of the objective function. In this case, :math:`\sigma` and :math:`\eta` are called hyperparameters, and PHYSBO will automatically set the best value.
+
+As an acquisition function, for example, Maximum Probability of Improvement (PI) and Maximum Expected Improvement (EI) are useful.
+The score of PI is defined as follows.
+
+.. math::
+
+ \text{PI} (\mathbf{x}) = \Phi (z (\mathbf{x})), \ \ \ z(\mathbf{x}) = \frac{\mu_c (\mathbf{x}) - y_{\max}}{\sigma_c (\mathbf{x})},
+
+where :math:`\Phi(\cdot)` is the cumulative distribution function.
+The PI score represents the probability of exceeding the maximum :math:`y_{\max}` of the currently obtained :math:`y`.
+In addition, the EI score is the expected value of the difference between the predicted value and the current maximum :math:`y_{\max}` and is given by
+
+.. math::
+
+ \text{EI} (\mathbf{x}) = [\mu_c (\mathbf{x})-y_{\max}] \Phi (z (\mathbf{x})) + \sigma_c (\mathbf{x}) \phi (z (\mathbf{x})), \ \ \ z(\mathbf{x}) = \frac{\mu_c (\mathbf{x}) - y_{\max}}{\sigma_c (\mathbf{x})},
+
+where :math:`\phi(\cdot)` is a probability density function.
+
+
+- Step3: Experiment (Simulation)
+
+Perform an experiment or simulation on the candidate :math:`{\bf x}^*` with the largest acquisition function selected in step 2, and estimate the objective function value :math:`y`. This will add one more piece of training data. Repeat steps 2 and 3 to search for candidates with good scores.
+
+Accelerating Bayesian Optimization with PHYSBO
+-----------------------------------------------
+
+In PHYSBO, random feature map, Thompson sampling, and Cholesky decomposition are used to accelerate the calculation of Bayesian optimization.
+First, the random feature map is introduced.
+By introducing the random feature map :math:`\phi (\mathbf{x})`, we can approximate the Gaussian kernel :math:`k(\mathbf{x},\mathbf{x}')` as follows.
+
+.. math::
+
+ k(\mathbf{x},\mathbf{x}') = \exp \left[ - \frac{1}{2 \eta^2} \| \mathbf{x} -\mathbf{x}' \| \right]^2 \simeq \phi (\mathbf{x})^\top \phi(\mathbf{x}') \\
+ \phi (\mathbf{x}) = \left( z_{\omega_1, b_1} (\mathbf{x}/\eta),..., z_{\omega_l, b_l} (\mathbf{x}/\eta) \right)^\top,
+
+where :math:`z_{\omega, b} (\mathbf{x}) = \sqrt{2} \cos (\boldsymbol{\omega}^\top \mathbf{x}+b)`.
+Then, :math:`\boldsymbol{\omega}` is generated from :math:`p(\boldsymbol{\omega}) = (2\pi)^{-d/2} \exp (-\|\boldsymbol{\omega}\|^2/2)` and :math:`b` is chosen uniformly from :math:`[0, 2 \pi]` is chosen uniformly from :math:`[0, 2 \pi]`.
+This approximation is strictly valid in the limit of :math:`l \to \infty`, where the value of :math:`l` is the dimension of the random feature map.
+
+ :math:`\Phi` can be represented as a :math:`l` row :math:`n` column matrix with :math:`\phi(\mathbf{x}_i)` in each column by :math:`\mathbf{x}` vector of training data as follows:
+
+.. math::
+
+ \Phi = ( \phi(\mathbf{x}_1),..., \phi(\mathbf{x}_n) ).
+
+It is seen that the following relation is satisfied:
+
+.. math::
+
+ \mathbf{k} (\mathbf{x}) = \Phi^\top \phi(\mathbf{x}) \\
+ K= \Phi^\top \Phi.
+
+Next, a method that uses Thompson sampling to make the computation time for candidate prediction :math:`O(l)` is introduced.
+Note that using EI or PI will result in :math:`O(l^2)` because of the need to evaluate the variance.
+In order to perform Thompson sampling, the Bayesian linear model defined below is used.
+
+.. math::
+
+ y = \mathbf{w}^\top \phi (\mathbf{x}),
+
+where :math:`\phi(\mathbf{x})` is random feature map described above and :math:`\mathbf{w}` is a coefficient vector.
+In a Gaussian process, when the training data :math:`D` is given, this :math:`\mathbf{w}` is determined to follow the following Gaussian distribution.
+
+.. math::
+
+ p(\mathbf{w}|D) = \mathcal{N} (\boldsymbol{\mu}, \Sigma) \\
+ \boldsymbol{\mu} = (\Phi \Phi^\top + \sigma^2 I)^{-1} \Phi \mathbf{y} \\
+ \Sigma = \sigma^2 (\Phi \Phi^\top + \sigma^2 I)^{-1}
+
+In Thompson sampling, one coefficient vector is sampled according to this posterior probability distribution and set to :math:`\mathbf{w}^*`, which represents the acquisition function as follows
+
+.. math::
+
+ \text{TS} (\mathbf{x}) = {\mathbf{w}^*}^\top \phi (\mathbf{x}).
+
+The :math:`\mathbf{x}^*` that maximizes :math:`\text{TS} (\mathbf{x})` will be selected as the next candidate.
+In this case, :math:`\phi (\mathbf{x})` is an :math:`l` dimensional vector, so the acquisition function can be computed with :math:`O(l)`.
+
+Next, the manner for accelerating the sampling of :math:`\mathbf{w}` is introduced.
+The matrix :math:`A` is defined as follows.
+
+.. math::
+
+ A = \frac{1}{\sigma^2} \Phi \Phi^\top +I
+
+Then the posterior probability distribution is given as
+
+.. math::
+
+ p(\mathbf{w}|D) = \mathcal{N} \left( \frac{1}{\sigma^2} A^{-1} \Phi \mathbf{y}, A^{-1} \right).
+
+Therefore, in order to sample :math:`\mathbf{w}`, we need to calculate :math:`A^{-1}`.
+Now consider the case of the newly added :math:`(\mathbf{x}', y')` in the Bayesian optimization iteration.
+With the addition of this data, the matrix :math:`A` is updated as
+
+.. math::
+
+ A' = A + \frac{1}{\sigma^2} \phi (\mathbf{x}') \phi (\mathbf{x}')^\top.
+
+This update can be done using the Cholesky decomposition ( :math:`A= L^\top L` ), which reduces the time it takes to compute :math:`A^{-1}` to :math:`O(l^2)`.
+If we compute :math:`A^{-1}` at every step, the numerical cost becomes :math:`O(l^3)`.
+The :math:`\mathbf{w}` is obtained by
+
+.. math::
+
+ \mathbf{w}^* = \boldsymbol{\mu} + \mathbf{w}_0,
+
+where :math:`\mathbf{w}_0` is sampled from :math:`\mathcal{N} (0,A^{-1})` and :math:`\boldsymbol{\mu}` is calculated by
+
+.. math::
+
+ L^\top L \boldsymbol{\mu} = \frac{1}{\sigma^2} \Phi \mathbf{y}.
+
+By using these techniques, a computation time becomes almost linear in the number of training data.
diff --git a/manual/v2.0.2/en/_sources/api/modules.rst.txt b/manual/v2.0.2/en/_sources/api/modules.rst.txt
new file mode 100644
index 00000000..0f96bbd4
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/modules.rst.txt
@@ -0,0 +1,7 @@
+physbo
+======
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.basis.fourier.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.basis.fourier.rst.txt
new file mode 100644
index 00000000..f64d2599
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.basis.fourier.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.basis.fourier module
+===============================
+
+.. automodule:: physbo.blm.basis.fourier
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.basis.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.basis.rst.txt
new file mode 100644
index 00000000..6b5d7474
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.basis.rst.txt
@@ -0,0 +1,18 @@
+physbo.blm.basis package
+========================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.basis.fourier
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm.basis
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.core.model.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.core.model.rst.txt
new file mode 100644
index 00000000..0412df90
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.core.model.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.core.model module
+============================
+
+.. automodule:: physbo.blm.core.model
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.core.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.core.rst.txt
new file mode 100644
index 00000000..1e5aabbd
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.core.rst.txt
@@ -0,0 +1,18 @@
+physbo.blm.core package
+=======================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.core.model
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm.core
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.inf.exact.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.inf.exact.rst.txt
new file mode 100644
index 00000000..ac801c82
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.inf.exact.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.inf.exact module
+===========================
+
+.. automodule:: physbo.blm.inf.exact
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.inf.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.inf.rst.txt
new file mode 100644
index 00000000..f63622cc
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.inf.rst.txt
@@ -0,0 +1,18 @@
+physbo.blm.inf package
+======================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.inf.exact
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm.inf
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.lik.gauss.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.gauss.rst.txt
new file mode 100644
index 00000000..6e8fda6b
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.gauss.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.lik.gauss module
+===========================
+
+.. automodule:: physbo.blm.lik.gauss
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.lik.linear.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.linear.rst.txt
new file mode 100644
index 00000000..5d8ae3c1
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.linear.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.lik.linear module
+============================
+
+.. automodule:: physbo.blm.lik.linear
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.lik.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.rst.txt
new file mode 100644
index 00000000..e1f64bfb
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.lik.rst.txt
@@ -0,0 +1,19 @@
+physbo.blm.lik package
+======================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.lik.gauss
+ physbo.blm.lik.linear
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm.lik
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.predictor.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.predictor.rst.txt
new file mode 100644
index 00000000..e625b7f6
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.predictor.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.predictor module
+===========================
+
+.. automodule:: physbo.blm.predictor
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.prior.gauss.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.prior.gauss.rst.txt
new file mode 100644
index 00000000..84d97f99
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.prior.gauss.rst.txt
@@ -0,0 +1,7 @@
+physbo.blm.prior.gauss module
+=============================
+
+.. automodule:: physbo.blm.prior.gauss
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.prior.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.prior.rst.txt
new file mode 100644
index 00000000..d9a90114
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.prior.rst.txt
@@ -0,0 +1,18 @@
+physbo.blm.prior package
+========================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.prior.gauss
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm.prior
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.blm.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.blm.rst.txt
new file mode 100644
index 00000000..d3e4a37f
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.blm.rst.txt
@@ -0,0 +1,30 @@
+physbo.blm package
+==================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.basis
+ physbo.blm.core
+ physbo.blm.inf
+ physbo.blm.lik
+ physbo.blm.prior
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm.predictor
+
+Module contents
+---------------
+
+.. automodule:: physbo.blm
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.core.learning.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.core.learning.rst.txt
new file mode 100644
index 00000000..5985e210
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.core.learning.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.core.learning module
+==============================
+
+.. automodule:: physbo.gp.core.learning
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.core.model.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.core.model.rst.txt
new file mode 100644
index 00000000..36a7b16a
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.core.model.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.core.model module
+===========================
+
+.. automodule:: physbo.gp.core.model
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.core.prior.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.core.prior.rst.txt
new file mode 100644
index 00000000..594dd681
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.core.prior.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.core.prior module
+===========================
+
+.. automodule:: physbo.gp.core.prior
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.core.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.core.rst.txt
new file mode 100644
index 00000000..46499eb8
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.core.rst.txt
@@ -0,0 +1,20 @@
+physbo.gp.core package
+======================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.core.learning
+ physbo.gp.core.model
+ physbo.gp.core.prior
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp.core
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.cov.gauss.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.cov.gauss.rst.txt
new file mode 100644
index 00000000..4dd7a21c
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.cov.gauss.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.cov.gauss module
+==========================
+
+.. automodule:: physbo.gp.cov.gauss
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.cov.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.cov.rst.txt
new file mode 100644
index 00000000..61369a91
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.cov.rst.txt
@@ -0,0 +1,18 @@
+physbo.gp.cov package
+=====================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.cov.gauss
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp.cov
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.inf.exact.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.inf.exact.rst.txt
new file mode 100644
index 00000000..34bce35f
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.inf.exact.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.inf.exact module
+==========================
+
+.. automodule:: physbo.gp.inf.exact
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.inf.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.inf.rst.txt
new file mode 100644
index 00000000..77b4176f
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.inf.rst.txt
@@ -0,0 +1,18 @@
+physbo.gp.inf package
+=====================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.inf.exact
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp.inf
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.lik.gauss.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.lik.gauss.rst.txt
new file mode 100644
index 00000000..19064760
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.lik.gauss.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.lik.gauss module
+==========================
+
+.. automodule:: physbo.gp.lik.gauss
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.lik.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.lik.rst.txt
new file mode 100644
index 00000000..566be484
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.lik.rst.txt
@@ -0,0 +1,18 @@
+physbo.gp.lik package
+=====================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.lik.gauss
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp.lik
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.mean.const.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.const.rst.txt
new file mode 100644
index 00000000..f934da4a
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.const.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.mean.const module
+===========================
+
+.. automodule:: physbo.gp.mean.const
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.mean.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.rst.txt
new file mode 100644
index 00000000..83cb1b7e
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.rst.txt
@@ -0,0 +1,19 @@
+physbo.gp.mean package
+======================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.mean.const
+ physbo.gp.mean.zero
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp.mean
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.mean.zero.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.zero.rst.txt
new file mode 100644
index 00000000..34fef4f9
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.mean.zero.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.mean.zero module
+==========================
+
+.. automodule:: physbo.gp.mean.zero
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.predictor.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.predictor.rst.txt
new file mode 100644
index 00000000..d036e0bb
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.predictor.rst.txt
@@ -0,0 +1,7 @@
+physbo.gp.predictor module
+==========================
+
+.. automodule:: physbo.gp.predictor
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.gp.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.gp.rst.txt
new file mode 100644
index 00000000..f1aca6c3
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.gp.rst.txt
@@ -0,0 +1,30 @@
+physbo.gp package
+=================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.core
+ physbo.gp.cov
+ physbo.gp.inf
+ physbo.gp.lik
+ physbo.gp.mean
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.gp.predictor
+
+Module contents
+---------------
+
+.. automodule:: physbo.gp
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.misc.centering.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.misc.centering.rst.txt
new file mode 100644
index 00000000..5e143933
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.misc.centering.rst.txt
@@ -0,0 +1,7 @@
+physbo.misc.centering module
+============================
+
+.. automodule:: physbo.misc.centering
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.misc.gauss_elim.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.misc.gauss_elim.rst.txt
new file mode 100644
index 00000000..9594c9aa
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.misc.gauss_elim.rst.txt
@@ -0,0 +1,7 @@
+physbo.misc.gauss\_elim module
+==============================
+
+.. automodule:: physbo.misc.gauss_elim
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.misc.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.misc.rst.txt
new file mode 100644
index 00000000..0d5cc640
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.misc.rst.txt
@@ -0,0 +1,20 @@
+physbo.misc package
+===================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.misc.centering
+ physbo.misc.gauss_elim
+ physbo.misc.set_config
+
+Module contents
+---------------
+
+.. automodule:: physbo.misc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.misc.set_config.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.misc.set_config.rst.txt
new file mode 100644
index 00000000..4d11fc85
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.misc.set_config.rst.txt
@@ -0,0 +1,7 @@
+physbo.misc.set\_config module
+==============================
+
+.. automodule:: physbo.misc.set_config
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.opt.adam.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.opt.adam.rst.txt
new file mode 100644
index 00000000..f7e248f1
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.opt.adam.rst.txt
@@ -0,0 +1,7 @@
+physbo.opt.adam module
+======================
+
+.. automodule:: physbo.opt.adam
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.opt.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.opt.rst.txt
new file mode 100644
index 00000000..aa7c9063
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.opt.rst.txt
@@ -0,0 +1,18 @@
+physbo.opt package
+==================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.opt.adam
+
+Module contents
+---------------
+
+.. automodule:: physbo.opt
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.predictor.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.predictor.rst.txt
new file mode 100644
index 00000000..ed9354f3
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.predictor.rst.txt
@@ -0,0 +1,7 @@
+physbo.predictor module
+=======================
+
+.. automodule:: physbo.predictor
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.rst.txt
new file mode 100644
index 00000000..114bf93a
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.rst.txt
@@ -0,0 +1,31 @@
+physbo package
+==============
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.blm
+ physbo.gp
+ physbo.misc
+ physbo.opt
+ physbo.search
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.predictor
+ physbo.variable
+
+Module contents
+---------------
+
+.. automodule:: physbo
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete.policy.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.policy.rst.txt
new file mode 100644
index 00000000..f7940641
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.policy.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.discrete.policy module
+====================================
+
+.. automodule:: physbo.search.discrete.policy
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete.results.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.results.rst.txt
new file mode 100644
index 00000000..c85dbd53
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.results.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.discrete.results module
+=====================================
+
+.. automodule:: physbo.search.discrete.results
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.rst.txt
new file mode 100644
index 00000000..97585687
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete.rst.txt
@@ -0,0 +1,19 @@
+physbo.search.discrete package
+==============================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.search.discrete.policy
+ physbo.search.discrete.results
+
+Module contents
+---------------
+
+.. automodule:: physbo.search.discrete
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.policy.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.policy.rst.txt
new file mode 100644
index 00000000..65202559
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.policy.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.discrete\_multi.policy module
+===========================================
+
+.. automodule:: physbo.search.discrete_multi.policy
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.results.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.results.rst.txt
new file mode 100644
index 00000000..d12c38b5
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.results.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.discrete\_multi.results module
+============================================
+
+.. automodule:: physbo.search.discrete_multi.results
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.rst.txt
new file mode 100644
index 00000000..09a57ac1
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.discrete_multi.rst.txt
@@ -0,0 +1,19 @@
+physbo.search.discrete\_multi package
+=====================================
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.search.discrete_multi.policy
+ physbo.search.discrete_multi.results
+
+Module contents
+---------------
+
+.. automodule:: physbo.search.discrete_multi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.pareto.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.pareto.rst.txt
new file mode 100644
index 00000000..9a24fb2a
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.pareto.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.pareto module
+===========================
+
+.. automodule:: physbo.search.pareto
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.rst.txt
new file mode 100644
index 00000000..a998ed95
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.rst.txt
@@ -0,0 +1,30 @@
+physbo.search package
+=====================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.search.discrete
+ physbo.search.discrete_multi
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ physbo.search.pareto
+ physbo.search.score
+ physbo.search.score_multi
+ physbo.search.utility
+
+Module contents
+---------------
+
+.. automodule:: physbo.search
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.score.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.score.rst.txt
new file mode 100644
index 00000000..1a6b6809
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.score.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.score module
+==========================
+
+.. automodule:: physbo.search.score
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.score_multi.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.score_multi.rst.txt
new file mode 100644
index 00000000..59f01d6b
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.score_multi.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.score\_multi module
+=================================
+
+.. automodule:: physbo.search.score_multi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.search.utility.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.search.utility.rst.txt
new file mode 100644
index 00000000..14ea05b6
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.search.utility.rst.txt
@@ -0,0 +1,7 @@
+physbo.search.utility module
+============================
+
+.. automodule:: physbo.search.utility
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/api/physbo.variable.rst.txt b/manual/v2.0.2/en/_sources/api/physbo.variable.rst.txt
new file mode 100644
index 00000000..66393924
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/api/physbo.variable.rst.txt
@@ -0,0 +1,7 @@
+physbo.variable module
+======================
+
+.. automodule:: physbo.variable
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/manual/v2.0.2/en/_sources/contact.rst.txt b/manual/v2.0.2/en/_sources/contact.rst.txt
new file mode 100644
index 00000000..060d0423
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/contact.rst.txt
@@ -0,0 +1,22 @@
+Contact
+=========================================
+
+- Bug Reports
+
+ Please report all problems and bugs on the github `Issues `_ page.
+
+ To resolve bugs early, follow these guidelines when reporting:
+
+ 1. Please specify the version of PHYSBO you are using.
+
+ 2. If there are problems for installation, please inform us about your operating system and the compiler.
+
+ 3. If a problem occurs during execution, enter the input file used for execution and its output.
+
+ Thank you for your cooperation.
+
+- Others
+
+ If you have any questions about your research that are difficult to consult at Issues on GitHub, please send an e-mail to the following address:
+
+ E-mail: ``physbo-dev__at__issp.u-tokyo.ac.jp`` (replace _at_ by @)
diff --git a/manual/v2.0.2/en/_sources/index.rst.txt b/manual/v2.0.2/en/_sources/index.rst.txt
new file mode 100644
index 00000000..62ccc38c
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/index.rst.txt
@@ -0,0 +1,19 @@
+.. PHYSBO documentation master file, created by
+ sphinx-quickstart on Tue May 26 18:44:52 2020.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to PHYSBO's documentation!
+==================================
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ introduction
+ install
+ notebook/index
+ algorithm
+ API Reference
+ acknowledgement
+ contact
diff --git a/manual/v2.0.2/en/_sources/install.rst.txt b/manual/v2.0.2/en/_sources/install.rst.txt
new file mode 100644
index 00000000..dce4561b
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/install.rst.txt
@@ -0,0 +1,125 @@
+Basic usage
+=====================
+
+Install
+---------------------
+
+Required Packages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* Python >= 3.6
+* numpy < 2.0.0
+* scipy
+
+Download and Install
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- From ``PyPI`` (recommended) ::
+
+ $ pip3 install physbo
+
+ - Required packages such as NumPy will also be installed at the same time.
+
+ - If you add the ``--user`` option, it will be installed under the user's home directory ::
+
+ $ pip3 install --user physbo
+
+
+- From source (for developers)
+
+ #. Download or clone the github repository
+
+ $ git clone https://github.com/issp-center-dev/PHYSBO
+
+ #. Update ``pip`` to 19.0 or higher ::
+
+ $ pip3 install -U pip
+
+ - If you don't have ``pip3``, you can install it with ``python3 -m ensurepip``.
+
+ #. Install ::
+
+ $ cd PHYSBO
+ $ pip3 install --user ./
+
+Uninstall
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+#. Execute the following command. ::
+
+ $ pip uninstall physbo
+
+
+Basic structures
+--------------------------
+
+PHYSBO has the following structure (shown up to the second level).
+
+..
+ |--physbo
+ | |--blm
+ | |--gp
+ | |--misc
+ | |--opt
+ | |--search
+ | |--predictor.py
+ | |--variable.py
+
+Each module is created with the following structure.
+
+- ``blm`` :Module for Baysean linear model
+- ``gp`` :Module for Gaussian Process
+- ``opt`` :Module for optimazation
+- ``search`` :Module for searching for optimal solutions
+- ``predictor.py`` :Abstract class for predictors
+- ``variable.py`` :Class defined for variable associations used in physbo
+- ``misc`` : Others (e.g., modules for normalizing the search space)
+
+For more information about each module, please refer to the API reference.
+
+Calculation flow
+--------------------------
+
+Bayesian optimization is well suited for optimization problems such as complex simulations or real-world experimental tasks where the objective function is very costly to evaluate.
+In PHYSBO, the following steps are used to perform the optimization (please refer to the tutorial and API reference for details on each).
+
+1. Defining the search space
+
+ Define each parameter set (d-dimensional vector) as a search candidate, where N: the number of search candidates , d: the number of input parameter dimensions. The parameter set should list all the candidates.
+
+2. Defining the simulator
+
+ For searching candidates defined above, define a simulator that gives the objective function values (values to be optimized, such as material property values) for each search candidate. In PHYSBO, the direction of optimization is to maximize the objective function, so if you want to minimize the objective function, you can do so by applying a negative value to the value returned by the simulator.
+
+3. Performing optimization
+
+ First, set the optimization policy (the search space is passed to policy as an argument at this stage). You can choose between the following two optimization methods.
+
+ - ``random_search``
+ - ``bayes_search``
+
+ In ``random_search``, we randomly select parameters from the search space and search for the largest objective function among them. It is used to prepare an initial set of parameters as a preprocessing step for Bayesian optimization. ``bayes_search`` performs Bayesian optimization. The type of score (acquisition function) in Bayesian optimization can be one of the following.
+
+ - TS (Thompson Sampling): Sample one regression function from the posterior probability distribution of the learned Gaussian process, and select the point where the predicetd value becomes maximum as a next candidate.
+ - EI (Expected Improvement): Select the point where the expected value of the difference between the predicted value by the Gaussian process and the maximum value in the current situation becomes the maximum as a next candidate.
+ - PI (Probability of Improvement): Select the point with the highest probability of exceeding the current maximum of the current acquisition function as a next candidate.
+
+ Details of Gaussian processes are described in :ref:`chap_algorithm` . For other details of each method, please see `this reference `_ .
+ If you specify the simulator and the number of search steps in these methods, the following loop will rotate by the number of search steps.
+
+ i). Select the next parameter to be executed from the list of candidate parameters.
+
+ ii). Run the simulator with the selected parameters.
+
+ The number of parameter returned in i) is one by default, but it is possible to return multiple parameters in one step. For more details, please refer to the "Exploring multiple candidates at once" section of the tutorial. Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYSBO (e.g., by experiment rather than numerical calculation), and register the evaluated value in PHYSBO. For more details, please refer to the "Running Interactively" section of the tutorial.
+
+4. Check numerical results
+
+ The search result ``res`` is returned as an object of the ``history`` class ( ``physbo.search.discrete.results.history`` ). The following is a reference to the search results.
+
+ - ``res.fx``: The logs of evaluation values for simulator (objective function) simulator.
+ - ``res.chosen_actions``: The logs of the action ID (parameter) when the simulator has executed.
+ - ``fbest, best_action= res.export_all_sequence_best_fx()``: The logs of the best values and their action IDs (parameters) at each step where the simulator has executed.
+ - ``res.total_num_search``: Total number steps where the simulator has executed.
+
+ The search results can be saved to an external file using the ``save`` method, and the output results can be loaded using the ``load`` method. See the tutorial for details on how to use it.
diff --git a/manual/v2.0.2/en/_sources/introduction.rst.txt b/manual/v2.0.2/en/_sources/introduction.rst.txt
new file mode 100644
index 00000000..66c72512
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/introduction.rst.txt
@@ -0,0 +1,59 @@
+Introduction
+=====================
+
+About PHYSBO
+----------------------
+
+PHYSBO (optimization tools for PHYSics based on Bayesian Optimization) is a Python library for fast and scalable Bayesian optimization. It is based on COMBO (Common Bayesian Optimization) and has been developed mainly for researchers in the materials science field. There are many attempts to accelerate scientific discovery through data-driven design-of-experiment algorithms in the fields of physics, chemistry, and materials. Bayesian optimization is an effective tool for accelerating these scientific discoveries. Bayesian optimization is a technique that can be used for complex simulations and real-world experimental tasks where the evaluation of objective function values (e.g., characteristic values) is very costly. In other words, the problem solved by Bayesian optimization is to find a parameter (e.g., material composition, structure, process and simulation parameters) with a better objective function value (e.g., material properties) in as few experiments and simulations as possible. In Bayesian optimization, the candidate parameters to be searched for are listed in advance, and the candidate with the largest objective function value is selected from among the candidates by making good use of machine learning (using Gaussian process regression) prediction. Experiments and simulations are performed on the candidates and the objective function values are evaluated. By repeating the process of selection by machine learning and evaluation by experimental simulation, we can reduce the number of times of optimization. On the other hand, Bayesian optimization is generally computationally expensive, and standard implementations such as scikit-learn are difficult to handle a large amount of data. PHYSBO achieves high scalability due to the following features
+
+- Thompson Sampling
+- random feature map
+- one-rank Cholesky update
+- automatic hyperparameter tuning
+
+Please see `this reference `_ for technical details.
+
+Citation
+----------------------
+
+When citing PHYSBO, please cite the following reference:
+
+Yuichi Motoyama, Ryo Tamura, Kazuyoshi Yoshimi, Kei Terayama, Tsuyoshi Ueno, Koji Tsuda,
+Bayesian optimization package: PHYSBO,
+Computer Physics Communications Volume 278, September 2022, 108405. Available from https://www.sciencedirect.com/science/article/pii/S0010465522001242?via%3Dihub (open access).
+
+Bibtex is given as follows: ::
+
+ @misc{@article{MOTOYAMA2022108405,
+ title = {Bayesian optimization package: PHYSBO},
+ journal = {Computer Physics Communications},
+ volume = {278},
+ pages = {108405},
+ year = {2022},
+ issn = {0010-4655},
+ doi = {https://doi.org/10.1016/j.cpc.2022.108405},
+ author = {Yuichi Motoyama and Ryo Tamura and Kazuyoshi Yoshimi and Kei Terayama and Tsuyoshi Ueno and Koji Tsuda},
+ keywords = {Bayesian optimization, Multi-objective optimization, Materials screening, Effective model estimation}
+ }
+
+Main Developers
+----------------------
+
+- ver. 1.0-
+
+ - Ryo Tamura (International Center for Materials Nanoarchitectonics, National Institute for Materials Science)
+ - Tsuyoshi Ueno (Magne-Max Capital Management Company)
+ - Kei Terayama (Graduate School of Medical Life Science, Yokohama City University)
+ - Koji Tsuda (Graduate School of Frontier Sciences, The University of Tokyo)
+ - Yuichi Motoyama (The Institute for Solid State Physics, The University of Tokyo)
+ - Kazuyoshi Yoshimi (The Institute for Solid State Physics, The University of Tokyo)
+ - Naoki Kawashima (The Institute for Solid State Physics, The University of Tokyo)
+
+License
+----------------------
+
+PHYSBO v2 is distributed under `the Mozilla Public License version 2.0 (MPL v2) `_.
+
+Copyright (c) <2020-> The University of Tokyo. All rights reserved.
+
+Part of this software is developed under the support of "Project for advancement of software usability in materials science" by The Institute for Solid State Physics, The University of Tokyo.
diff --git a/manual/v2.0.2/en/_sources/notebook/index.rst.txt b/manual/v2.0.2/en/_sources/notebook/index.rst.txt
new file mode 100644
index 00000000..1374b15b
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/notebook/index.rst.txt
@@ -0,0 +1,14 @@
+Tutorials
+==================================
+Here, the usage of PHYSBO is introduced through tutorials.
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ tutorial_basic
+ tutorial_Gaussian_process
+ tutorial_interactive_mode
+ tutorial_once_mode
+ tutorial_multi_probe
+ tutorial_multi_objective
diff --git a/manual/v2.0.2/en/_sources/notebook/tutorial_Gaussian_process.ipynb.txt b/manual/v2.0.2/en/_sources/notebook/tutorial_Gaussian_process.ipynb.txt
new file mode 100644
index 00000000..2be47fc7
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/notebook/tutorial_Gaussian_process.ipynb.txt
@@ -0,0 +1,780 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "# Gaussian process\n",
+ "\n",
+ "PHYSBO performs Bayesian optimization while running Gaussian process regression.\n",
+ "\n",
+ "Therefore, it is possible to run Gaussian process regression given training data, and to predict test data using the trained model.\n",
+ "\n",
+ "In this section, the procedure is introduced.\n",
+ "\n",
+ "\n",
+ "## Preparation of search candidate data\n",
+ "\n",
+ "In this tutorial, the problem of finding a stable interface structure for Cu is used as an example. The values that have already been evaluated are used, although the evaluation of the objective function, i.e., the structural relaxation calculation, actually takes on the order of several hours per calculation. For more information on the problem setup, please refer to the following references\n",
+ "\n",
+ "- S. Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).\n",
+ "\n",
+ "\n",
+ "Save the dataset file [s5-210.csv](https://raw.githubusercontent.com/issp-center-dev/PHYSBO/master/examples/grain_bound/data/s5-210.csv) into the subdirectory `data`, and load dataset from this file as the following:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:41.987250Z",
+ "start_time": "2020-12-04T06:11:41.537168Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import physbo\n",
+ "\n",
+ "import numpy as np\n",
+ "\n",
+ "def load_data():\n",
+ " A = np.asarray(np.loadtxt('data/s5-210.csv',skiprows=1, delimiter=',') )\n",
+ " X = A[:,0:3]\n",
+ " t = -A[:,3]\n",
+ " return X, t\n",
+ "\n",
+ "X, t = load_data()\n",
+ "X = physbo.misc.centering( X )\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "## Defining training data\n",
+ "\n",
+ "A randomly selected 10% of the target data will be used as training data, and another randomly selected 10% will be used as test data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:51.077070Z",
+ "start_time": "2020-12-04T06:11:51.072211Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Ntrain = 1798\n",
+ "Ntest = 1798\n"
+ ]
+ }
+ ],
+ "source": [
+ "N = len(t)\n",
+ "Ntrain = int(N*0.1)\n",
+ "Ntest = min(int(N*0.1), N-Ntrain)\n",
+ "\n",
+ "id_all = np.random.choice(N, N, replace=False)\n",
+ "id_train = id_all[0:Ntrain]\n",
+ "id_test = id_all[Ntrain:Ntrain+Ntest]\n",
+ "\n",
+ "X_train = X[id_train]\n",
+ "X_test = X[id_test]\n",
+ "\n",
+ "t_train = t[id_train]\n",
+ "t_test = t[id_test]\n",
+ "\n",
+ "print(\"Ntrain =\", Ntrain)\n",
+ "print(\"Ntest =\", Ntest)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "## Learning and Prediction of Gaussian Processes\n",
+ "\n",
+ "The following process is used to learn the Gaussian process and predict the test data.\n",
+ "\n",
+ "1. Generate a model of the Gaussian process\n",
+ "\n",
+ "2. The model is trained using X_train (parameters of the training data) and t_train (objective function value of the training data).\n",
+ "\n",
+ "3. Run predictions on the test data (X_test) using the trained model.\n",
+ "\n",
+ "Definition of covariance (Gaussian)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:55.403677Z",
+ "start_time": "2020-12-04T06:11:55.399915Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "cov = physbo.gp.cov.gauss( X_train.shape[1],ard = False )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Definition of mean value"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:56.279543Z",
+ "start_time": "2020-12-04T06:11:56.277082Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "mean = physbo.gp.mean.const()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Definition of likelihood function (Gaussian)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:57.077507Z",
+ "start_time": "2020-12-04T06:11:57.075581Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "lik = physbo.gp.lik.gauss()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Generation of a Gaussian Process Model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:11:57.832602Z",
+ "start_time": "2020-12-04T06:11:57.828902Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "gp = physbo.gp.model(lik=lik,mean=mean,cov=cov)\n",
+ "config = physbo.misc.set_config()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Learning a Gaussian process model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.218792Z",
+ "start_time": "2020-12-04T06:11:58.261609Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Start the initial hyper parameter searching ...\n",
+ "Done\n",
+ "\n",
+ "Start the hyper parameter learning ...\n",
+ "0 -th epoch marginal likelihood 17312.31220145003\n",
+ "50 -th epoch marginal likelihood 6291.292745798703\n",
+ "100 -th epoch marginal likelihood 3269.1167759139516\n",
+ "150 -th epoch marginal likelihood 1568.3930580794922\n",
+ "200 -th epoch marginal likelihood 664.2847129159145\n",
+ "250 -th epoch marginal likelihood -249.28468708456558\n",
+ "300 -th epoch marginal likelihood -869.7604930929888\n",
+ "350 -th epoch marginal likelihood -1316.6809532065581\n",
+ "400 -th epoch marginal likelihood -1546.1623851368954\n",
+ "450 -th epoch marginal likelihood -1660.7298135295766\n",
+ "500 -th epoch marginal likelihood -1719.5056128528097\n",
+ "Done\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "gp.fit(X_train, t_train, config)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Output the parameters in the learned Gaussian process."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.227479Z",
+ "start_time": "2020-12-04T06:12:58.221821Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "\n",
+ "likelihood parameter = [-2.81666924]\n",
+ "mean parameter in GP prior: [-1.05939674]\n",
+ "covariance parameter in GP prior: [-0.91578975 -2.45544347]\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "gp.print_params()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Calculating the mean (predicted value) and variance of the test data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.605713Z",
+ "start_time": "2020-12-04T06:12:58.244883Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "gp.prepare(X_train, t_train)\n",
+ "fmean = gp.get_post_fmean(X_train, X_test)\n",
+ "fcov = gp.get_post_fcov(X_train, X_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Results of prediction"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.618218Z",
+ "start_time": "2020-12-04T06:12:58.607794Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([-1.00420815, -1.10923758, -0.97840623, ..., -1.00323733,\n",
+ " -0.97015759, -1.11076236])"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fmean"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Results of covariance"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.628483Z",
+ "start_time": "2020-12-04T06:12:58.622345Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([0.00056069, 0.00075529, 0.00043006, ..., 0.0016925 , 0.00070103,\n",
+ " 0.00073499])"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fcov"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Output mean square error of prediction"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.636081Z",
+ "start_time": "2020-12-04T06:12:58.631461Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.008107085662147708"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "np.mean((fmean-t_test)**2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "## Prediction by trained models\n",
+ "\n",
+ "Read the parameters of the trained model as gp_params and make predictions using them.\n",
+ "\n",
+ "By storing gp_params and training data (X_train, t_train), prediction by the trained model is possible."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Prepare the learned parameters (must be done immediately after learning)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.645968Z",
+ "start_time": "2020-12-04T06:12:58.639012Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([-2.81666924, -1.05939674, -0.91578975, -2.45544347])"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#Prepare the learned parameters as a 1D array\n",
+ "gp_params = np.append(np.append(gp.lik.params, gp.prior.mean.params), gp.prior.cov.params)\n",
+ "\n",
+ "gp_params"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Prepare a model similar to the one used for training as gp"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:58.666019Z",
+ "start_time": "2020-12-04T06:12:58.653259Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "#Definition of covariance (Gaussian)\n",
+ "cov = physbo.gp.cov.gauss( X_train.shape[1],ard = False )\n",
+ "\n",
+ "#Definition of mean value\n",
+ "mean = physbo.gp.mean.const()\n",
+ "\n",
+ "#Definition of likelihood function (Gaussian)\n",
+ "lik = physbo.gp.lik.gauss()\n",
+ "\n",
+ "#Generation of a Gaussian Process Model\n",
+ "gp = physbo.gp.model(lik=lik,mean=mean,cov=cov)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Prepare a model similar to the one used for training as gp"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:59.016429Z",
+ "start_time": "2020-12-04T06:12:58.673034Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "#Input learned parameters into the Gaussian process.\n",
+ "gp.set_params(gp_params)\n",
+ "\n",
+ "\n",
+ "#Calculate the mean (predicted value) and variance of the test data\n",
+ "gp.prepare(X_train, t_train)\n",
+ "fmean = gp.get_post_fmean(X_train, X_test)\n",
+ "fcov = gp.get_post_fcov(X_train, X_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Results of prediction"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:59.020795Z",
+ "start_time": "2020-12-04T06:12:59.017606Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([-1.00420815, -1.10923758, -0.97840623, ..., -1.00323733,\n",
+ " -0.97015759, -1.11076236])"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fmean"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Results of covariance"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:59.026523Z",
+ "start_time": "2020-12-04T06:12:59.023035Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([0.00056069, 0.00075529, 0.00043006, ..., 0.0016925 , 0.00070103,\n",
+ " 0.00073499])"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fcov"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "Output mean square error of prediction"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:12:59.033497Z",
+ "start_time": "2020-12-04T06:12:59.027871Z"
+ },
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.008107085662147708"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "np.mean((fmean-t_test)**2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [
+ "Note: In the example above, we used the same pre-registered X to make predictions.\n",
+ "If you want to make predictions for parameters X_new that is not included in X using the trained model,\n",
+ "you first obtain the mean (X_{mean}) and standard deviation (X_{std}) of the data X and\n",
+ "then normalize X_{new} by (X_{new} - X_{mean}) / X_{std}.\n",
+ "Also, the data format for X is ndarray format.\n",
+ "Therefore, if X_{new} is a single data, it must be transformed to ndarray format.\n",
+ "For example, if X_{new} is a real number, you should replace X_new as\n",
+ "X_new = np.array(X_new).reshape(1)"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
\ No newline at end of file
diff --git a/manual/v2.0.2/en/_sources/notebook/tutorial_basic.ipynb.txt b/manual/v2.0.2/en/_sources/notebook/tutorial_basic.ipynb.txt
new file mode 100644
index 00000000..817c2a53
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/notebook/tutorial_basic.ipynb.txt
@@ -0,0 +1,590 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Basic usage of PHYSBO\n",
+ "\n",
+ "## Introduction\n",
+ "\n",
+ "In this tutorial, we will introduce how to define the simulator class and find the minimum value of a one-dimensional function using PHYSBO.\n",
+ "\n",
+ "First, we will import PHYSBO."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.091028Z",
+ "start_time": "2021-03-05T04:50:29.600019Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import physbo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Defining the search space\n",
+ "\n",
+ "In the following example, the search space ``X`` is defined as a grid chopped by ``window_num=10001`` divisions from ``x_min = -2.0`` to ``x_max = 2.0``.\n",
+ "Note that ``X`` must be in ``window_num`` x ``d`` ndarray format (``d`` is the number of dimensions, in this case one). In this case, ``d`` is the number of dimensions, in this case two, so we use reshape to transform it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.097211Z",
+ "start_time": "2021-03-05T04:50:30.092637Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "#In\n",
+ "import numpy as np\n",
+ "import scipy\n",
+ "import physbo\n",
+ "import itertools\n",
+ "\n",
+ "#In\n",
+ "#Create candidate\n",
+ "window_num=10001\n",
+ "x_max = 2.0\n",
+ "x_min = -2.0\n",
+ "\n",
+ "X = np.linspace(x_min,x_max,window_num).reshape(window_num, 1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Defining the simulator class\n",
+ "\n",
+ "Here, we define the simulator class to set as the objective function.\n",
+ "\n",
+ "In this case, the problem is to find the minimum $x$ such that $f(x) = 3 x^4 + 4 x ^3 + 1.0$ (the answer is $x=-1.0$).\n",
+ "\n",
+ "In the simulator class, we define the ``__call__`` function (or ``__init__`` if there are initial variables, etc.).\n",
+ "(If there are initial variables, define ``__init__``.) The action indicates the index number of the grid to be retrieved from the search space, and is generally in the form of an ndarray so that multiple candidates can be calculated at once.\n",
+ "In this case, we choose one candidate point from ``X`` as ``action_idx=action[0]`` to calculate only one candidate at a time.\n",
+ "Since **PHYSBO is designed to find the maximum value of the objective function**, it returns the value of f(x) at the candidate point multiplied by -1."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.104491Z",
+ "start_time": "2021-03-05T04:50:30.099622Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# Declare the class for calling the simulator.\n",
+ "class simulator:\n",
+ "\n",
+ " def __call__(self, action ):\n",
+ " action_idx = action[0]\n",
+ " x = X[action_idx][0]\n",
+ " fx = 3.0*x**4 + 4.0*x**3 + 1.0\n",
+ " fx_list.append(fx)\n",
+ " x_list.append(X[action_idx][0])\n",
+ "\n",
+ " print (\"*********************\")\n",
+ " print (\"Present optimum interactions\")\n",
+ "\n",
+ " print (\"x_opt=\", x_list[np.argmin(np.array(fx_list))])\n",
+ "\n",
+ " return -fx"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Setting policy\n",
+ "\n",
+ "First, set the optimization `policy`. \n",
+ "\n",
+ "Next, set `test_X` to the matrix of search candidates (`numpy.array`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.117205Z",
+ "start_time": "2021-03-05T04:50:30.108470Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# set policy \n",
+ "policy = physbo.search.discrete.policy(test_X=X)\n",
+ "\n",
+ "# set seed\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When `policy` is set, no optimization is done yet.\n",
+ "Execute the following methods on `policy` to optimize it.\n",
+ "\n",
+ "- `random_search`. \n",
+ "- `bayes_search`.\n",
+ "\n",
+ "If you specify the `simulator` and the number of search steps in these methods, the following loop will be executed for the number of search steps.\n",
+ "\n",
+ "i) Select the next parameter to be executed from the candidate parameters.\n",
+ "\n",
+ "ii) Execute `simulator` with the selected parameters.\n",
+ "\n",
+ "The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step.\n",
+ "See the section \"Searching for multiple candidates at once\" for details. \n",
+ "\n",
+ "Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYSBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the \"Running Interactively\" section of the tutorial.\n",
+ "\n",
+ "### Random Search\n",
+ "\n",
+ "First of all, let's perform a random search.\n",
+ "\n",
+ "Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search. \n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.149378Z",
+ "start_time": "2021-03-05T04:50:30.120660Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "fx_list=[]\n",
+ "x_list = []\n",
+ "\n",
+ "res = policy.random_search(max_num_probes=20, simulator=simulator())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.\n",
+ "\n",
+ "````\n",
+ "0020-th step: f(x) = -19.075990 (action=8288)\n",
+ " current best f(x) = -0.150313 (best action=2949) \n",
+ "````\n",
+ "\n",
+ "\n",
+ "### Bayesian Optimization\n",
+ "\n",
+ "Next, we run the Bayesian optimization as follows.\n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). \n",
+ "- `score`: The type of acquisition function. You can specify one of the following\n",
+ " - TS (Thompson Sampling) \n",
+ " - EI (Expected Improvement) \n",
+ " - PI (Probability of Improvement) \n",
+ "- `interval`: \n",
+ "The hyperparameters are trained at the specified interval. \n",
+ "If a negative value is specified, no hyperparameter will be learned. \n",
+ "0 means that hyperparameter learning will be performed only in the first step. \n",
+ "- `num_rand_basis`: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.482575Z",
+ "start_time": "2021-03-05T04:50:30.151250Z"
+ },
+ "code_folding": [],
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.bayes_search(max_num_probes=50, simulator=simulator(), score='TS', \n",
+ " interval=0, num_rand_basis=500)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Checking the results\n",
+ "\n",
+ "The search result ``res`` is returned as an object of the ``history`` class (`physbo.search.discrete.results.history`). \n",
+ "The following is a reference to the search results.\n",
+ "\n",
+ "- `res.fx` : The history of evaluated values of simulator (objective function).\n",
+ "- `res.chosen_actions`: The history of action IDs (parameters) when the simulator was evaluated. \n",
+ "- `fbest, best_action= res.export_all_sequence_best_fx()`: The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.\n",
+ "- `res.total_num_search`: Total number of simulator evaluations.\n",
+ "\n",
+ "Let's plot the objective function value and the best value at each step. \n",
+ "`res.fx` and `best_fx` should range up to `res.total_num_search`, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.747272Z",
+ "start_time": "2021-03-05T04:50:32.484125Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.897286Z",
+ "start_time": "2021-03-05T04:50:32.754938Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAD3CAYAAADv7LToAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAp40lEQVR4nO3deXDkZ33n8fe3b90zkua0RzPjA7AxGOzBB2BsbHMkBJJyKnFBWEgKMJvKVkLNVrZqQ4VcewaTbLHxkji3QxYvSbFZAwXB9mLHxh6D7QDxwYJtPJ6x55BGmtHV9+/ZP379a0mtbvWhnpl+NJ9XlcpWS9N6pNF89NX3ucw5h4iI+C12tgcgIiLrpzAXEdkAFOYiIhuAwlxEZANQmIuIbACJs/FBx8fH3Z49e87GhxYR8dYTTzwx5ZzbUu9tZyXM9+zZw+OPP342PrSIiLfM7GCjt6nNIiKyASjMRUQ2AIW5iMgGoDAXEdkAFOYiIhuAwlxEZAPoapib2X4zO2Bm+7v5vCIisraurTM3s13AbufcNWb2FTO7xzn3XLeeX86MIHBMzec5cipXecmyZSjNe163AzOr+2emFwoAjA6kWvoYU/N5DrxwgkPTWXaMZNgxkmHnpj62j2RIxlurL5xzvDC1wGMvTJOIG5fuGOairYNkkvEVn8vR2RyHphc5mS0ynyuxUCgxlyuRL5ZXPWc6GSediNGXipNJxBlIJxjpS7KpP8lIX5J0Isb0QoHJuTyT83km5/KkEzFG+lNs7k+yqS9FOhkjVyyTKwZki2WyhXLl9TLZyuOBcyTjRjIeIxGPkYgZpXJAsewolgNKgaMcOJyDwDmcc8RixmA6wVAmwWA6yUA6Tr4UVD+n+XyJYik8ztoMDIjFDDOImxEzIxYz+lNxhjNJhvsSDGeSpBIxTmWLnFwscnKxwKlskVLgCAJHUPn4/ak452/u5/zNfewa7Wdzf5JT2SKHZ7K8fDLLKyezxMzYNpxm63CGrUNphtJJJufzHJvNcWw2x/G5PIFzpOIx0okYqUSMmBmBc5QDKAcB5cBRChzFsgu/HoED58CMmIFhxGOQSsTIVP6u0ok4fak4A6kE/enwv4m4kS2EX+/FQplsoUSuGFT/HvKlYNXnWO8o8NGBFHu3DLJ3bIDzNvcRjxnz+RIvTi1w8MQih2cWSSdijA6mGR9IMTqYIm7G1HyBqfk8J+bznMwWSSVi9CXj4Usqzq7Rfq6Y2NzS93k7urlp6J3A3WZ2JbAVuA6ohrmZ3QbcBjAxMdHFD3t6BJVvrFSi9V9evv7UEe569CB/85GricfqB18vc85x8x8+yAuTC6ve9ujVJ/id972WRE3YPvL8FL/yt0/igP926xu44dVb6z7vo8+f4N5nj/Ho8yf4wdG5uh8/ETM+9rYL+LfveNWqjwOQK5b5yveP8K3npnjk+SmOzeZXvD0eMy7cMsD2kT5enlnk0EyWQilo+Pku/9mkY/1bl4gZpWDjfcHW+n5IxWMMZhLVwmU93nf5zp4P863Ad4FPEYb2u5e/0Tl3J3AnwL59+7r2nXB8LsfJxSIXbhnsaoDu/+J3+cYzx7j5km289/KdvO1V46QT8TX/zEM/muKR50/w5EszvGnPaNfGcqbkSwEvTC7wE5dt55YrzmfHSIbtIxn+7KEf88cPPs8rJ7P80QeuYCCdwDnHXz/yIr/31We5YHyAeMz4pb/6Dr9208X86o0XE6v8Xfzo2By/+5VneOhHU6QTMd60Z5Rff9dO3nzhGBduHeT4bJ4jp8Lq7sAL03zuged58uAM//39b2TrcKY6tod+NMlv/sNTvHhikbGBFNdeOMabLxzn2gvHcM7x7JE5nj0yy7NHZjk2l+PirUPcfMk2Jsb6mRjtZ3N/qlLVJhjMJFb9XTrnKJQDcsWAfKWKns+XOLVY5FQ2fMkVy4wOptkymGbLUJrxwRTFsuPkYoGTleo2VyxXK7BMMk4mGVZlmUpllknGMYNSEFafhXJYkSZiMVLxGIm4kYhbtZo2AzOjHDjm82EFPp8L/5tOxKqfz2A6QSoew1U+F0dUcYb/DRyUA0e2UGY2F34+s9kihVLASOW3ik39SYb7kqTiMWJG9ePP5Uu8PJPl0PQih2eyHJ/LMz6Y4vzNfezc1Md5m/pwEFbgs3mOz+WYy5XYMpRm61CmWrHHzSiUAwql8PMOAkc8ZksvFn7uiVjl6xCz6m+DbtnnUCiHf0f5UlD9jWexUGYhX2KxUKZYDqp/B/2ppa99OhknU6nq632Na78fJufz/HhygRdPLPDjqUVOZQvsGu1nz9gAu8f62TXaT6EUML1Q4MR8gemFAmXnGB9IMT6UZmwgxab+FMXy0jizhTLp5No50inr1k1DZvZRIHoZAF7nnPuzeu+7b98+163t/Lf+yaM89uNpBlJxXn/+Jt4wsYk3XzjGdRfXPb6gZbf8j2/x46mwQp1ZLDKUSfBTr9/J77zvtQ2r9Y/+9Xe479nj3Pa2C/iNn7xkXR//bDg+m+Oq/3Q/v/czl/Gvrtm94m1/+9hBfvMfnuKSHcP88Qev5LP3/4i/e+Iw77h0G3946xuIm/HJf/gXvvTky1z/qi38zvtey1898iJ/c+Ag/ak4n7j5VfzC1RMr2iD1fOnJw3zyfz/FQDrBZ9//Bi7aMsjvffVZvvy9V9gz1s9vv++1vO3iLdUfFiLnEjN7wjm3r97bulmZ3wu82zn3lJn9V+CuLj53QycXi7xm+xBX7R3lu4dO8qf/9AKfe+B5Dvz7m9g+kmn+BA1kiwFX7h7lcx+8gm89N8XnD7zEF779Ej+/73ze2OBXpKOzOQDue+ZYT4b5d16cZiCV4NKdw3XfPpsrATCcWf1t8QtX72bnSB+/8j+f5PpPf5PAwa/edDGfuGmpCv/Mz13Ovt2j/PY9T3PD7Q8QM3j/VRPsf8erGBtMtzTGW644n8vOG+GXP/8EH/yzx+hPJSiUAj5x88X86+svbPrDQORc1bUwd84drKxkeQz4snPu6W4991qKQcClO4b53Z++DIB7nznGx+56nGOzuXWFeb5YJpOMkYzHuOHVW9k6lOG+Z49x5FSONzb4M0dP5ehLxnlhaoHnjs9z0dbBjj9+twWB45c//wSXn7+JP//FN9V9n9lcEYDhTLLu29/+mq188ePX8h+++gwfvnYPP/G6HSvebmZ84OoJLjtvmC98+xAfunY3l+yo/4NjLa/aNsQ9/+at/PY9TzOzWOCT77mUveMDbT+PyLmkq6cmOuduB27v5nM2UywHpJZNlo0PhisqTizkG/2RlkS9z8jOTeEPhldOZuu+f6EUMDVf4NZ9u/hfjx/i3meOnbYwL5UDfvZzj/CrN13MTZdsa+nPfPfwSabmC8wsNp7AmYsq877G3xaXnTfC3bddu+bHev35m3j9+ZtaGlcjA+kEn/65y9f1HCLnEu83DRVLjkR8qX86Xvl1fmp+fbPO2WJ5xa/0I31J+pJxjpzK1X3/43Ph42+c2MRl5w1z37PH1vXx13JiocD3Dp/ioR9Ntfxn7q+MJ2ql1DObDSvzoQaVuYj0Lv/DvBysWJs8FlXm6wzzXDGgL7UU5mbGjk0ZjpyqX5kfrYT8tpEM77hkO0++NMPk3Pp+O2gkWh4VTdC24v5njwNLgV1PtTJXmIt4Z8OFeX8qQV8yzon5zoPUORdW5jWrVnaO9DWszKPJzx0jGW6+dCvOwTd/cLzjMaxlphLmL55oLcwPzyzyg6NzDKTinFojzKOe+VCdCVAR6W0bIMxXb+wZG0xxYh2L+/OVjSa160F3jGQ4crJBmFdCfvtwhkt3DHPepj6+8czpabVMV/reh2eyFMuNN8VEoqr8J163o7o2t565XJF4ZZegiPhlA4R5QKJmzfHYYJqpdVTm+WIYkH21Yb6pj+NzOUp1AvToqVy4tbsviZlx8yVbefi5SbKF+sG5HlFlXg4ch2fqt32Wu+/ZY1ywZYDLzx8BltoptWazJYYyiYbb9kWkd3kd5tGW+9rzPMYHUuvqmWcrlWvtmuadIxkCB8fq9MKPzubYMZKpBuE7Lt1Orhjw8HOtT1K2anphqVXyYpO++Xy+xGMvTHPzJdsY7gt74VE7pdZcrqh+uYinvA7zYhBWyPXbLJ1X5lEboi+18nl3bOoD4Eid5YnHZnNsW7b9/OoLRhnKJLivTqvlxHyef3z6KP/hK8/w03/0MFf83r1NQ3m5mcVC9eiCZpOgD/1wkkI54KbXbK0GdaO++WyupH65iKe8/pdbKodHESTjq9ssJ+YLOOc6ahlUK/PE6soc4JU6k6BHTuW4cvfSztBos9H9PzhGOXD88Ngc//j0Ub7x9DGeOTILhD+EXn/eCNMLBb7xzFFue9uFLY1veqHA+Zv7mJ4vNJ0Eve/Z44z0Jbly92a+d/gk0HhFiypzEX95HebR5F8iVlOZD6QoBY7ZbImR/vbDKdegzdKoMnfOcXw2v2rH6Tsu3caXv/cKb/4v93NsNo8ZXDmxmV9/16u5au8orz9/hHQizs1/8CAPP3ei5TCfWSwwOhAeHPXiicWG71cOHN/8f8d5+6u3kIiH/XxovNZ8Nlti91h/S2MQkd7idZgXKmGerGmzRBuHTizkOwzz8Hlrwzw6T7p2eeL0QoFCOWD78Mowf/urt/Ca7UNsH8nwiZu3c9Ml4bEAtd560Th3f+clcjUblRqZXiiwfThDXyrO9w+favh+3z00w/RCobpLNKq616zM+1SZi/jI7555pc2SWtVmibb0dzYJulSZr/7y7BzpW7Wl/8iyZYnLDWWSfP0Tb+Ovfukq3n/VRN0gB7ju4nFyxYAnD860NL6ZhQKbB1LsHR/g8MxiwzO773v2OImYcf2rwxMko6BWz1xk4/E6zKMlgrWrWaIbbzrdOLQ0Abq6Sg53ga6szI9VNgx1erDX1ReMkYgZD7W48mW60mbZMzZA4ODQTP1Wy/3PHuOqvaPVijyTjJNKxOquZonOy1bPXMRPXod5sUGYr/d8lkYToAA7RvpWbemvVuYdhvlgOsEbJzbxrRbCPLyGLGBTf5I9lZME662EOTS9yA+Pza86iGs4k2Q2u7pnPl/po6syF/GT12FeKNVfzbK5f33nszTqmUO4omVqvkC+tLQZ6NhsjpjBlhbP7K7nrRdt4V9ePlXdENRItPtztD9VPRa23iRo9IPh+leNr3h8uC9Rt2dePf5WPXMRL3kd5o0q81RlJ2ana82rbZY6YR6taDm6rNVy9FSOLUPpuvdWtuqtF4/hHDzy/Ik13y8K+80D4SXCQ5lE3cr8wAsn2DKU5sItK4/hHc4k67ZZ5ta4mEJEet+GDHOobBxaZ5slXWcCdEe01nzZGS1HZ3OrJj/bdfn5mxhKJ3j4uck13y86MXF0IIWZsXd8YNVac+ccj75wgmsuGFu1zn6kL7l2Za6euYiXPA/zqM2y+tMYH+j8fJZ8sYwZpOvc9RmF+fK++dFT67vVCCARj3HNhWNNt/9Hl0tEraQ9YwOrdoG+eGKRY7N5rrlg9aXSw33JuuvM56o9c4W5iI88D/OoMl+9y3M9JyeGx9/G6+4e3TFS2Th0qruVOYTrzQ9NZzm4xq7O5ZU5wJ7xAV45mV3Rwz/wQtiqueaCsVV/fjiTqLs0MarW17plSER61wYJ80Ztlk575kHdNeYQLlfc3J+srjVfyJeYy5XYts7KHOCtF4eTlWvdIDSzUMCM6m7OveP94fLE6aXfFB59/gRbh9JcUOfezKjN4pxb8fhcTrcMifhs44b5QJqZxWLd42qbqb3/s9aOZZdULL+UYr0uGB9g50iGh9cI8+nFApv6ktWDtnaPrVye6JzjQIN+OYRtllLgqvMCkVktTRTxmudhXtkBmlgdWtHFzjOLjW/WaaT2/s9aOzdlqpX5sei6uC60WcyMt1w0ziPPT1EOXN33mVkosrnSYgHYG4V5pTXzwtQCx+fydVssQMOTE+dyRfqS8bo/GEWk93n9L3ftNsvS+SztCtssrVXmjbbyd+qtF48zmyvxLy/XP3NleqHAaP9SmG8eSDHSl6xOgkb98msvbBDmlZ547cah2WxJ/XIRj22IMK+3vntsoPONQ+GBV42/NDs2ZTiVLbJYKFXbLOtdzRJ5y0Vh3/zhH9VfojizWFhRmUM4CXqwsnHowAvTbBtOs6fB6YcjDS6omMsX1S8X8ZjXYV5ocJ45LB221cnyxGanF+6srGh55WSOY7M5hjMJ+lPdqWrHB9NcuGWg4WmItZU5wN6xfn48tRCuL3++cb8cGp+cOJstacOQiMe8DvNi5bTAVIMJUOiwMi81mwBdWmt+pAtrzGvtGRvgpenVW/Sdcw0r81dOZXnmyCxT83mubdAvh8YnJ87lVJmL+MzrMC8FjXvmI5UVH530zLOFZhOg0SUVYWW+vVKpd8uu0X4OTS+uWj44ny9RLDtGB1aG7p6xAZyDv3v8MFB/fXmk2maprcxzJZ3LIuIxr8M8Ws2SqNNmicWM0Q4vdm42AbptOIMZvHIqG+7+HO78gK16Jkb7WSiUqxuEIjOVi5w396+uzAG+9ORhtg9n1rwtKFp6WLsLNKzM1WYR8ZXXYR5dypCM1f80xgZSHR2D22wCNJWIMT6Y5tB0lsn5fNdWskQmRsMwrm21VE9MHKjtmYdhPpsrce2FjfvlEP4W05+Kr2qzhD1zVeYivvI6zIvlgETMiMXqh9f4YLrDpYnNr2/bOZLhe4dP4hxdb7NElXVtmFfPZakJ85H+JJsr1+PVO4+lVnim+VKY54plCuVAlbmIx7wO81Lg1tzk0snJic45cqVgzQlQCNeaP3d8HoDtI91ts5y/OQzzQ7VhvrB0lnmtqNWyVr88MtK38hhcnWUu4r+2w9zMEmb2a2Y2WfP4fjM7YGb7uze8tRVKQd1liZGxgXTb57MUy45y4NZss0C41jzSjd2fy/Wl4mwdSq9usyzUr8wBLts5wgXjA9UWzVrCCyqWeuY6y1zEf538600AjwHfjx4ws13AbufcNWb2FTO7xzn3XLcG2UixHDStzBcK5ZZvvYdwWSLUv2VouZ3LWis7utxmgbBvXq/NEo9Z3dD95HsuIVcsr9kvjwxnkitOfayemKieuYi32q7MnXM559wBYPm6uXcCd5vZlcBW4LoujW9NzcI8Op+lnaNwc4XWwjyqzFOJWLVf3U0To/0rTkIEmF4osrk/VTewM8k4m+q0X+qpbbPM6ZAtEe81DXMz+5CZ3bfspV4bZStQAD4F3AZsq30HM7vNzB43s8cnJ9e+TadVxbIjWeeQrcjSxqHWWy1r3f+5XFSNbx/OtFQNt2vXaD+vnMpWV+xA2DOvXWPeieGa24bUMxfxX9NSzDl3F3BXk3ebBO4APgoMAKvOcHXO3QncCbBv3776RwK2qZU2C7S3CzRqszSbAN1Zqcy7vSwxMjHaj3Pw8sls9eLm6cXCqjXmnRjOJJjLlwgCRyxmqsxFNoBurWa5FzjsnHsKuAV4tEvPu6ZiOWi4xhzCpYnQ3vks2WqbZe0vzdahDPGYdeVSinomKssTl986FFbmXQjzviTOwVw+DHH1zEX815Uwd84dBA6Y2WPAnHPu6W48bzPN2ixR8LXVMy+2VpnHY8bP79vFu167qqPUFdGqlOXLE+udy9KJ4Zot/XO5EvGY0Z9qbZJYRHpPx79XO+durnn9duD2dY+oDc3aLP2pOJlkrK2eeXQDT7qF1S//+ZbXtfy87doymCadiFVXtASBY2axWHeNebuqJydWeuWzla38p6P3LyJnhtebhpqFuZlV1pq3U5lHE6Bn90sTi9mK5YlzuRLlwHWpMg9/hp9aVpmrXy7iN8/D3NU9/na58cEUU220WfItToCeCWGYh8sTl85lWX9fe+nkxKWeufrlIn7zPMyDuicmLjc22N4u0GyL68zPhOVH4VZ3f56GNosqcxH/eR3m4Xb+tT+FsTaPwW11AvRMmBjtZz5fYmaxWD2XpSthXjMBOptTZS7iO6/DvBQ0b7OMDaaZXiisuuihkWyLm4bOhOVH4TY6/rYTQ+kEZitXs+iWIRG/eR3m4QTo2m2W8cEUhXJQXVPdTFSZpxNn/0szsewo3Jk1DtlqVyxmDKUT1QsqZrPF6qSoiPjp7CfWOhRLAYmmlXl7u0BzpTLpRKzhGeln0q5lR+FOLxZIxWMMdGkteLSlPwgc8wVV5iK+8zrMC+W1zzOH9s9nyTW5//NM6kvF2TKU5qUTYWW+eSDZtbXgw5kkp7JF5vIlnNPxtyK+8/pfcLEckGq6miWszFu9Pi5XbH4xxZkUrTUfSCe6MvkZiU5OnMtpK7/IRuB1ZV5qsmkIls5nafX6uGyT+z/PtCjMZxa7cy5LJLqgIlprrp65iN96J7U6EJ7NsvanEFWzU3OtVua902aBMMxfOZXl2GyuK5OfkeHMyspcPXMRv3kb5s45CuWAZJOJylQixmA6seo2+kZypaDnwtw5ODyT7cq5LJHhvrBnPlu9Mk5hLuIzb8O8FITrxpu1WQAG0wnm8y2GeaHH2ixjS3d6drMyH+lLslgoV5c8ageoiN96J7XaVCpXwryF9eBDmUT1AoZmcqVyz02ARka7eD1dtHrl8Mnw7BfdMiTiN2/DvFAOd2q2Upm3E+bZHlqaCEtH4UJ3K/MovA/PhKcyqjIX8Zu3YV6shnnzddeDmWR1oq+ZXKm3wjwWM3ZVqvNurmYZqYZ5lr5kvKUfiiLSu7z9F1xstzJveTt/b02AwlKrpZvrzKPK/OWZrKpykQ3A3zAvtT4BOtxOz7zHJkBhKcy7us68snrl6GxO/XKRDaC3UqsNxaCNNks60VabpZcmQAH27dnM9uFMdTdrN0SbhMqBU2UusgF4+684arM0OwIXwg0xuWLQ9Jq5UjmgWHY912b5qdfv5Kdev7OrzzmyrBrXGnMR//lbmVfaLM1OTYSllRrzTVotuVJv3P95JvQl4yQqG65UmYv4z9vUKrSzmiUdhlWzvnkv3TJ0uplZtVeunrmI/7wN81KbbRaAuSa7QKP7P9PnQJjDUqtFlbmI/7wN82IbO0Cj3Y7NKvN86dypzGHp66KeuYj/PA7zsDJPtHAjULUybxLm2ULv3P95JlTbLKrMRbznbZi3s51/sFqZr91myVUq83NhAhSWKnL1zEX8521qVZcmtnjQFsB8k12g59IEKCyFuHrmIv7zNsyrpya2sTSxeZslqszPlTBXz1xko/A2zNtZmphOxEnFY8w2bbOcYz3zTFSZK8xFfOdtmLdz0BaE1XnTTUOFc6tnvnUojVl3z3wRkbPD22ZpsdR+mDfdNFQ6t9os7718JxdsGWDLUPpsD0VE1qmjEtTMPmtmD5rZXyx7bL+ZHTCz/d0bXmNL18Y1b7NAuKKl6WqWc2wCNJOMc+Xu0bM9DBHpgrbD3MwGgL93zl0PHDOz68xsF7DbOXcNcKOZXdTtgdZqZ2kiwFA62XQ1y7m2zlxENo62w9w5t+Cc+6fKq9NAHngncLeZXQlsBa6r/XNmdpuZPW5mj09OTq5nzEB755lD622WVDxGvIWNSCIivaRpEprZh8zsvmUv+yuPXwxc4Zz7NmGAF4BPAbcB22qfxzl3p3Nun3Nu35YtW9Y98GI5IGa0HLxDmWRLSxPT58jkp4hsLE0nQJ1zdwF3LX/MzHYCvw98uPLQJHAH8FFgAJjq7jBXa3Y2ea2hTKLp0sR8j93/KSLSqk7L0M8AH3fOzVZevxc47Jx7CrgFeLQbg1tLsexaOjExMpRJMJ8v4Zxr+D65YnDOTH6KyMbSyQToVcANwBfN7AEze59z7iBwwMweA+acc093eZyrFMtBSycmRoYyCZyDhcpa8nqyPXj/p4hIK9peZ17pke+o8/jtwO3dGFQriuWgpRMTI4Pp6OTEYvWyilq9eP+niEgrvC1DCx30zGHtq+PCCVCFuYj4x9swL5VdSycmRqIwn10jzHOlQBOgIuIlb8M8XM3SeptlqIUzzfPFMn3qmYuIh7xNrrBn3k5l3vy2oWxRSxNFxE/ehnmh7NpezQJrX1CRK2oCVET85G2Yl8oBqbbaLEurWRoJlyYqzEXEP96Gebs7QPuTcczWbrPkSoG284uIl7xNrkLZkWgjzGMxYzDd+LCtIHAUStoBKiJ+8jbMi6X22iwQXpPWKMzPtYspRGRj8TfM22yzAJXKvH7PPFcMzzJXZS4iPvI2zEuBazvMo8O26skWz637P0VkY/E2uQql9ivztS6oyBXVZhERf3kb5u3uAAUYzCTXaLMozEXEX56HuSpzERHwOMxL5c565nMNeuaaABURn3kb5oVyQDLRXptlKJ2gUArIl1ZfUJEtaAJURPzlbXIVywHJNg7agrUP29I6cxHxmZdhXg4cgaOjNgvUv6BCbRYR8ZmXYV4sh8Hbdptljco8Wmeus1lExEdeJlcU5qkOdoBC/ZMT85UwV2UuIj7yNMwd0Hmbpd6KlqUJUIW5iPjH0zAPK/NEBwdtQeMJ0HjM2v4BISLSC7xMrkKp0jNvt82yxj2guaKOvxURf3kZ5qUgbLO02zNfutS5/gSo1piLiK+8TK/qapY2wzwZj5FJxuqenJjTZc4i4jEvwzxqs7TbMwcYTNc/bEthLiI+8zLMO12aCDCcSTDbYNOQ2iwi4isv06vTpYlQuaCibpiXNQEqIt7yMsxL1Z55+22WoQZnmmfVZhERj3kZ5oXqdv72hx/eA9qozaIwFxE/dRTmZvY5M/ummd2+7LFbzewxM/t094ZXX7XN0uapidD4HlBNgIqIzzqtzH/LOfd2YNTMLjCzfuC9zrmrgUEzu7F7Q1yt04O2IGqzNAjzDip9EZFe0FF6OeeOm1kfsA04BVwLfNXMJoDXANd3b4irdbrOHMJdoPP5EuXKxqNIrlimL6XKXET81DQNzexDZnbfspf9ldD+IfCSc+4EsJUw1P8A+DBhyNc+z21m9riZPT45ObmuQUdtlk6XJgKrWi2aABURnyWavYNz7i7grtrHK4F+h5m9BZgEPg38OyAPTNV5njuBOwH27dvnat/ejk4P2oJlF1TkS4z0JaOxaQJURLzWdmlroQnnnAPmgEHgUWAGuA+4BXikq6Ossa42Szo6OXFpeWK+sqNUm4ZExFedpNcI8Fkz+yYwDtzrnFsA/hT4DnAp8LXuDXG1Tk9NhPqHbeUqF1NkEqrMRcRPTdsstZxzJ4GfqfP4F4AvrH9IzXV6aiLUvwe0ev+nJkBFxFNe9hWKpfXtAAWYXdZmOZktALoyTkT85WeYV3rm8VjnE6DL2yxfevJl4jHjTXtHuzNAEZEzzMswL5QdqXgMs/WH+UK+xBe+/RLvvmw7523q6+o4RUTOFC/DvFQOOmqxQNhKiceM+XzYZvm7xw8xlyvxkbfu7eYQRUTOKC/DvFgOOjpkC8DMqodtlQPHXz7yIm+c2MQVE5u7PEoRkTPHyzAvlB2JDg7ZigxlwjC//9ljHDyxqKpcRLznZZgXywGpDtsssHTY1p8//GPO29THu1+7vYujExE589peZ94LSutoswAMpRN899BJpubz/MZPvoZEB+vVRUR6iZcpViy7jnZ/RoYyCabm8/Sn4tz6pokujkxE5OzwMswL5WDdYQ7w8/t2VQ/bEhHxmZdhXlzH0kSAkb4kZvBLb9nTvUGJiJxFXvbMi+uszH/xLXu59sIxdo8NdHFUIiJnj6dh7tZVme8dH2DvuIJcRDYOj9ssXg5dROS08DIRFeYiIit5mYjF0vraLCIiG42fYR6oMhcRWc7LRAy383s5dBGR08LLRAzbLF4OXUTktPAyEYvlgIR65iIiVV6G+Xq384uIbDReJmKp7Eit49REEZGNxstEXO/ZLCIiG413YR4EjlKwvpuGREQ2Gu8SsRgEAGqziIgs410ilsoOQG0WEZFlvAvzYjmszLWaRURkiXeJWFCYi4is4l0iFtVmERFZxb8wL6kyFxGp1XEimtnlZva1Za/vN7MDZra/O0OrrxQozEVEanWUiGYWAz4CJCuv7wJ2O+euAW40s4u6N8SVCqWozaIwFxGJdJqIHwP+ctnr7wTuNrMrga3AdesdWCNLq1nUMxcRiTS90NnMPgR8aNlD/xfIOOf+2awaqFuB7wKfAm4D3l3neW6rvI2JiYmOB6yliSIiqzVNROfcXc65m6MX4BTwDjN7ALjSzH4ZmATuAD4JpIGpOs9zp3Nun3Nu35YtWzoe8NJqFoW5iEik7UR0zt3hnLvWOXcD8IRz7nPAvcBh59xTwC3Ao90d5pKoMk8l1GYREYl0pbx1zh0EDpjZY8Ccc+7pbjxvPVGY66AtEZElTXvma6m0XaL/vx24fd0jakI9cxGR1bxLxEKlZ642i4jIEu/CvKTKXERkFe8SUW0WEZHVvEvEqM2S0KYhEZEq78I8OmgrpcpcRKTKu0TUQVsiIqt5l4jaASoispp3iVgo6aAtEZFa3oV5sRyQiBnLDvkSETnneRnmarGIiKzkXSoWy04tFhGRGh6GeUAq4d2wRUROK+9SMeyZezdsEZHTyrtULJYdSR2yJSKygodhrglQEZFa3qVisRxoK7+ISA3vUrFYdjpkS0SkhodhrjaLiEgt71JRYS4ispp3qVgsO/XMRURqeJeKYWWunrmIyHLehXmhFJBQZS4isoJ3qailiSIiq3mXiqVAB22JiNTyLsyLJa1mERGp5V0qFspOPXMRkRrepWLYM1ebRURkOe/CvKRNQyIiq3iXiuERuN4NW0TktPIqFZ1zFFSZi4is4lUqlgIHQDKmnrmIyHIdhbmZHTWzByovV1Ye229mB8xsf3eHuKRYDgDUZhERqdFpKn7dOXdD5eUJM9sF7HbOXQPcaGYXdXGMVcVypTJXm0VEZIVOU/FtZvaQmX3GzAx4J3B3pUrfClzXtREuE1XmWpooIrJS0zA3sw+Z2X3LXvYDn3DOXQckgJ8kDPAC8CngNmBbnee5zcweN7PHJycnOxpsFObaNCQislKi2Ts45+4C7mrw5q8CbwAmgTuAjwIDwFSd57kTuBNg3759rpPBFktqs4iI1NN2KprZqJndWHl1H/AccC9w2Dn3FHAL8Gj3hrikGFQmQNVmERFZoZMSdxb4gJk9CFwC/B/n3EHggJk9Bsw5557u5iAjSz1zVeYiIss1bbPUcs6VCNsptY/fDtzejUE1ojaLiEh9XqXiYCbBe163g+0jmbM9FBGRntJ2ZX427R0f4I5fuOJsD0NEpOd4VZmLiEh9CnMRkQ1AYS4isgEozEVENgCFuYjIBqAwFxHZABTmIiIbgMJcRGQDMOc6OsBwfR/UbBI4uI6nGKfOyYw9TOM9vTTe00vjPb3aGe9u59yWem84K2G+Xmb2uHNu39keR6s03tNL4z29NN7Tq1vjVZtFRGQDUJiLiGwAvob5nWd7AG3SeE8vjff00nhPr66M18ueuYiIrORrZS4iIssozEVENgDvwtzM9pvZATPbf7bHUo+ZJczs1ypr6aPHen3MnzWzB83sLyqv9/R4Aczsc2b2TTO7vfL6rWb2mJl9+myPrR4zu9zMvlb5fx++vkfN7IHKy5W9PmYz+2Dle/geM+vr5fGa2cSyr+0hM/uZbozXqzA3s12Ei+avAW40s4vO9pjqSACPAd+H3h+zmQ0Af++cux44ZmbX0cPjXea3nHNvB0bN7FLgvc65q4FBM7vxLI9tBTOLAR8Bkr3+/bDM151zNzjnbgCO08NjNrMM8F7g7cAthJtwena8zrmXln1tnwKeoAvj9SrMgXcCd5vZlcBW4LqzPJ5VnHM559wBIJpZ7ukxO+cWnHP/VHl1GngzPTzeiHPuuJn1AduAS4CvmtkE8Brg+rM6uNU+Bvxl5f97+vthmbeZ2UNm9hl6f8xXA1ngH4FP0fvjBcDMLgBepkvj9S3MtwIFwr+w2wj/Ifc6L8ZsZhcDVxB+T/gw3gngh8BLQAo4BfwB8GF6aMxmth04zzn3z5WHvPh+AD7hnLuO8DfN7fT2mHcAA8C7gL348zW+FbibLo3XtzCfBO4APgmk8eP8hZ4fs5ntBH4f+DgejBfCX1WBCcLfgF4F/EfgT4A8vTXmnwXeYWYPAFcC8/jx9b2n8r9fBYr09pgXgAedcwHwIBDQ2+ONvBN4gC79m/MtzO8FDjvnniLsjT16lsfTCh/G/Bng4865WTwYr4UmXLhJYg74LjAD3Ec45kfO4vBWcM7d4Zy7ttIffQK4h97/+o4um3fYBxyjt8f8BGEwArwWOEBvjzdqsRx1zpXo0r85r8LcOXcQOGBmjwFzzrmnz/aYmun1MZvZVcANwBcr1ePl9PB4K0aAz5rZNwknu74M/CnwHeBS4GtncWxr6vXvh4pZ4ANm9iDhfMTn6eExO+deAb5jZo8ASefcg/TweCt+mvD7tmvfE9oBKiKyAXhVmYuISH0KcxGRDUBhLiKyASjMRUQ2AIW5iMgGoDAXEdkAFOYiIhvA/wcvthzl6qMzpgAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "scores = policy.get_score(mode=\"EI\", xs=X)\n",
+ "plt.plot(scores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Parallelization\n",
+ "\n",
+ "PHYSBO can calculate acquisition functions for candidates in parallel by using MPI via `mpi4py` .\n",
+ "To enable MPI parallelization, pass a MPI communicator such as `MPI.COMM_WORLD` to a keyword argument, `comm` of the constructor of the `policy`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:34.224416Z",
+ "start_time": "2021-03-05T04:50:34.222375Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# from mpi4py import MPI\n",
+ "# policy = physbo.search.discrete.policy(test_X=X, comm=MPI.COMM_WORLD)"
+ ]
+ }
+ ],
+ "metadata": {
+ "anaconda-cloud": {},
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/manual/v2.0.2/en/_sources/notebook/tutorial_basic_org.ipynb.txt b/manual/v2.0.2/en/_sources/notebook/tutorial_basic_org.ipynb.txt
new file mode 100644
index 00000000..26238c42
--- /dev/null
+++ b/manual/v2.0.2/en/_sources/notebook/tutorial_basic_org.ipynb.txt
@@ -0,0 +1,565 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Basic usage of PHYSBO\n",
+ "\n",
+ "## Introduction\n",
+ "\n",
+ "In this tutorial, the problem of finding a stable interface structure for Cu is solved as an example. The values that have already been evaluated are used, although the evaluation of the objective function, i.e., the structural relaxation calculation, actually takes on the order of several hours per calculation. For more information on the problem setup, please refer to the following references\n",
+ "\n",
+ "- S. Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).\n",
+ "\n",
+ "---\n",
+ "\n",
+ "Let's try each step using the sample data.\n",
+ "\n",
+ "First, we will import PHYSBO."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:05.943971Z",
+ "start_time": "2020-12-04T06:02:05.507138Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import physbo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preparation of search candidate data\n",
+ "\n",
+ "First, load the data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:05.950047Z",
+ "start_time": "2020-12-04T06:02:05.945622Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "\n",
+ "def load_data():\n",
+ " A = np.asarray(np.loadtxt('data/s5-210.csv',skiprows=1, delimiter=',') )\n",
+ " X = A[:,0:3]\n",
+ " t = -A[:,3]\n",
+ " return X, t"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.105894Z",
+ "start_time": "2020-12-04T06:02:05.961463Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "X, t = load_data()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the following, N is defined as the number of search candidates and d is defined as the dimensionality of the input parameters.\n",
+ "\n",
+ "X is an N x d matrix, where each row represents a parameter set (a d-dimensional vector) for each candidate. \n",
+ "t is a vector of N dimensions, corresponding to the negative energy of each candidate (the value of the objective function to be optimized).\n",
+ "Normally, when we perform Bayesian optimization, we start with only X given and t does not exist. Therefore, in actual use, the value of t can only be obtained by receiving a candidate Bayesian optimization proposal and evaluating it with a simulator. Since this is a tutorial, we will skip the calculations and give t in advance.\n",
+ "\n",
+ "**PHYSBO assumes that the direction of optimization is \"maximization\".** \n",
+ "\n",
+ "Therefore, the original problem setting is \"energy minimization\", but when optimizing with PHYSBO, the objective function value is multiplied by a negative value and treated as a \"negative energy maximization\" problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.115603Z",
+ "start_time": "2020-12-04T06:02:06.107365Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[0. , 1. , 0. ],\n",
+ " [0. , 1. , 0.1],\n",
+ " [0. , 1. , 0.2],\n",
+ " ...,\n",
+ " [8. , 1.5, 3.4],\n",
+ " [8. , 1.5, 3.5],\n",
+ " [8. , 1.5, 3.6]])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.120704Z",
+ "start_time": "2020-12-04T06:02:06.116918Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([-1.01301176, -1.01487066, -1.02044168, ..., -1.11680203,\n",
+ " -2.48876352, -2.4971452 ])"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "t"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To scale the search parameters, standardize each column of X so that the mean is 0 and the variance is 1, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.127129Z",
+ "start_time": "2020-12-04T06:02:06.121967Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "X = physbo.misc.centering( X )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.132603Z",
+ "start_time": "2020-12-04T06:02:06.129255Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[-1.71079785, -1.46385011, -1.68585446],\n",
+ " [-1.71079785, -1.46385011, -1.59219588],\n",
+ " [-1.71079785, -1.46385011, -1.4985373 ],\n",
+ " ...,\n",
+ " [ 1.71079785, 1.46385011, 1.4985373 ],\n",
+ " [ 1.71079785, 1.46385011, 1.59219588],\n",
+ " [ 1.71079785, 1.46385011, 1.68585446]])"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Definition of simulator\n",
+ "\n",
+ "Define the simulator class called in PHYSBO. \n",
+ "The return value of the `__call__` method is the value of the objective function when action is given. \n",
+ "action is the ID of the search candidate (0, 1, ..., N-1). , N-1).\n",
+ "\n",
+ "In this tutorial, we have defined a simulator that only returns the already computed value of t when action is given. \n",
+ "Please customize the simulator class if you want to apply it to other problems."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.136850Z",
+ "start_time": "2020-12-04T06:02:06.134076Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "class simulator:\n",
+ " def __init__( self ):\n",
+ " _, self.t = load_data()\n",
+ " \n",
+ " def __call__( self, action ):\n",
+ " return self.t[action]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Setting policy\n",
+ "\n",
+ "First, set the optimization `policy`. \n",
+ "\n",
+ "Next, set `test_X` to the matrix of search candidates (`numpy.array`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.149593Z",
+ "start_time": "2020-12-04T06:02:06.143075Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# set policy \n",
+ "policy = physbo.search.discrete.policy(test_X=X)\n",
+ "\n",
+ "# set seed\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When `policy` is set, no optimization is done yet.\n",
+ "Execute the following methods on `policy` to optimize it.\n",
+ "\n",
+ "- `random_search`. \n",
+ "- `bayes_search`.\n",
+ "\n",
+ "If you specify the `simulator` and the number of search steps in these methods, the following loop will be executed for the number of search steps.\n",
+ "\n",
+ "i) Select the next parameter to be executed from the candidate parameters.\n",
+ "\n",
+ "ii) Execute `simulator` with the selected parameters.\n",
+ "\n",
+ "The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step.\n",
+ "See the section \"Searching for multiple candidates at once\" for details. \n",
+ "\n",
+ "Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the \"Running Interactively\" section of the tutorial.\n",
+ "\n",
+ "### Random Search\n",
+ "\n",
+ "First of all, let's perform a random search.\n",
+ "\n",
+ "Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search. \n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.380266Z",
+ "start_time": "2020-12-04T06:02:06.154735Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.random_search(max_num_probes=20, simulator=simulator())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.\n",
+ "\n",
+ "````\n",
+ "0020-th step: f(x) = -1.048733 (action=1022)\n",
+ " current best f(x) = -0.963795 (best action=5734) \n",
+ "````\n",
+ "\n",
+ "\n",
+ "### Bayesian Optimization\n",
+ "\n",
+ "Next, we run the Bayesian optimization as follows.\n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). \n",
+ "- `score`: The type of acquisition function. You can specify one of the following\n",
+ " - TS (Thompson Sampling) \n",
+ " - EI (Expected Improvement) \n",
+ " - PI (Probability of Improvement) \n",
+ "- `interval`: \n",
+ "The hyperparameters are trained at the specified interval. \n",
+ "If a negative value is specified, no hyperparameter will be learned. \n",
+ "0 means that hyperparameter learning will be performed only in the first step. \n",
+ "- `num_rand_basis`: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.374318Z",
+ "start_time": "2020-12-04T06:02:06.382690Z"
+ },
+ "code_folding": [],
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.bayes_search(max_num_probes=80, simulator=simulator(), score='TS', \n",
+ " interval=20, num_rand_basis=5000)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Checking the results\n",
+ "\n",
+ "The search result ``res`` is returned as an object of the ``history`` class (`physbo.search.discrete.results.history`). \n",
+ "The following is a reference to the search results.\n",
+ "\n",
+ "- `res.fx` : The history of evaluated values of simulator (objective function).\n",
+ "- `res.chosen_actions`: The history of action IDs (parameters) when the simulator was evaluated. \n",
+ "- `fbest, best_action= res.export_all_sequence_best_fx()`: The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.\n",
+ "- `res.total_num_search`: Total number of simulator evaluations.\n",
+ "\n",
+ "Let's plot the objective function value and the best value at each step. \n",
+ "`res.fx` and `best_fx` should range up to `res.total_num_search`, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.620065Z",
+ "start_time": "2020-12-04T06:03:19.375626Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.774131Z",
+ "start_time": "2020-12-04T06:03:19.621947Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD3CAYAAADxJYRbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAABCdElEQVR4nO2deXgc1ZX231PVrX21Fku2Zcv7vgsbDF4wYIITSAJZIYFAiAlZHSczX74szGSyswTCfDCEZEjiSYAQmLBvtrENAWwj492WbdnyrtWyrMVaernfH9VVXd1dvUhqdftWn9/z6IEuVavq+lade+57zz2HhBBgGIZhUgMl2TfAMAzDJA42+gzDMCkEG32GYZgUgo0+wzBMCsFGn2EYJoVwJPsGIlFcXCwqKyuTfRsMwzBSsX379hYhRInV7y5qo19ZWYnq6upk3wbDMIxUENHxcL9jeYdhGCaFYKPPMAyTQrDRZxiGSSHY6DMMw6QQbPQZhmFSCDb6DMMwKQQbfYZhmBSCjT6T0qzf34iTrReSfRsMkzAu6s1ZFzP/8/4xnG7rwfdWTIJDTZ2xs+F8D8ryM5J9G3HhdFs37lxbjVGFmXjh65ejKCd9yK/p9nht+7w0nO+By+PFiIJMqArhfLcL6/Y34rU99Wjq6IVTJThUBTfNG4nPXjI62bc7YM53u9DU3oOJw3NjOv9MWzceXHcI31g+AWOKsof47qJjz6cvATy2+Sge23wE33xqB3rdnpi/J4TAf206gmt+sxnnuvqG8A7jzzuHm3HpLzfg/SNnQ37X2tUHj1eugjyv720AADS19+Krf9mOPrd3SK/3wbFWzPz3N7F+f+OQXicZvLz7DBbf+xYW37sRU3/8OpY/sAlVP1uH7/19F2oaOlCck4bsdAeaO3rx4+f34XRbd9LuVQiBbzz5IZ7cemJA3//x83vxqcfehzeG510IgR8/vxd/334KN/9+a1LbrdNvo09EDiL6NhE1x3DuGiLaQkRrIh1LFJ29bnzjyQ8HPZ1v7ujF6bZuzB1dgNf2NuDOP1fjQp876vdcHi++/9we/Pr1Ghxu6sS2Y60Bv+/qdeMnL+3D2c7eQd3fUKG/JK/sORNw/GTrBVz6yw346MPvYMOBRiSyGpvHK7DjxDm0DODf7LU99Zhanof7Pj0LHxw7hx89vyfivfe4PHh62wm8VTMwo7316Fl0uzz4+pMfojqo72Xmya0n8M2ndmBORQF+deNM3H5FJSaV5uJLiyrx/Ncvxz//z5X44+0L8D9fXoi/3LkQAPDIxtqk3e/++na8vLse//biXuw/096v77Z09uK1vfU43+1CfXtP1PPX7W/EhpomfO6SCrT3uHDL77egKYbvDSUD8fQdALYC2B3pJCKqADBGCHEpgOVENMHq2ACuP2C2Hz+Hl3fX48ltAxvhdXaebAMA/GDlVNx70yy8W9uC7/xtZ8TvnDp3AV/64zb8rfokvrp0PJwqYceJtoBzNtQ04Y/vHsOf3w+bNiNptHb1Yf2BRhAB6/c3BXg5L+46gz63F119bnz5z9X4zO/ex4mzidHJH95wGJ989D1U/Ww95v90HW57YhvaLkSfQTWc70H18XNYOaMMH58zEt9cPgHPVJ/CM9UnQ8690OfGH945iiX3bsT3/3cP7nvj0IDu9UBDB4bnpWNkQSbu+NMHONjQMaC/M1jcHi8ON3bgYIP209QxcCP0u81H8IN/7MHSSSVYe8dCfG7BaPzf66bisS/Oxw8/Og1zKgpARMb5Iwsy8bkFFXjmg5MBztf7R87iL1uOJ8RheGlXPVSFUJCVhtV/24Eel3+m3tXrDvgczN+rT8Hl0e7xcGPk/rvQ58ZPXtqPycNz8dNPzMCf71iA5o5e3PKHrWjvccWnMQOg30ZfCNEjhNgCIFrvrADwNBHNB1AKYHGYYwmjtqkTgDatH8zDtetkG1SFMGNEPj5zSQW+eOkYbDrYDLcnUB7ocXnwp3frcNN/vYcrfr0RW4+24t5PzcL3r5uCaSPysePEuYDzP6jTvL+/V59MuFQihIDLE17eeH7Habg8AquWjENDew/2nD5v/O6lXWcwb3QB3vruMvzsEzNQU9+Be17cO+T3XNvUgUc31eLqqaX48cem4dLxRdh8qBlbjkb3ot/Yp0k7180sBwB85+pJmDQ8By/tqg8591tP7cTPXjmA8SU5mD4iL+K/UyRq6tsxe1QB/nzHAmSmqbj1ia1DOqt7eMNhfOmP20KO/+dbtbjmwbdx7UPazxW/3hjTTDWYc119+NXrNbh2+nA8/sUqZKapMX3va8smQFEI//nWYQDa8/PF/96KHz2/F/e+cTDmd9PjFXj7UDO+9dQOLPzFevzi1QM4fyGyMRVC4OXdZ3D5hGI88OnZONTYiV+/XoMLfW78dv1hXPLz9Vj0q7fw3PZTIffh9Qo8te0EppRpWr5uT8Lx2w2HcbqtGz/75Aw4VQXzRhfi4c/PxeGmTrx9KKpQMmRENfpEdCsRrTf9xCrLlALoA3APgFUAhoc5Fny9VURUTUTVzc3x/YfRO6mupQsHo4zSkdh5sg1TynKNh3zWqAL0ur04drYr4LxHN9bi31/aj65eN/7l2snY+L1l+ExVBQBgbkUBdp86HzBQbKtrRW66A/Xne/D24cQ+FH967xgu++UGS4MmhMAz1Scxe1Q+7l46HqpCeHO/ZjQPNXagpqEDN8weAaeq4AuXjsHXrpyATQebsf34uZC/FY13a1vwjx2nop7n9Qr84H/3IivNgV/dNAtfvmIs7vvULBAhJg/61T31mDQ8BxNKcwAAikJYMHYYdp5sCxhwe90evHO4GbdeNgZPrboUE0pzQgb3WOhxeVDX0oUp5XmoGJaFx74wH43tvXhjX+xS0S9ePWCsQ8TChgON2HSwGQfq/RKG1yvw7PZTmD+mEI/eMg+fX1CBPrcXnb39N/pHWzohBPDZSyqQ5ojdfyzLz8AXFo7Bcx+exm/ePIhvPb0D80YX4rNVFfivTUfw0PrDUf9GbVMHlty7Ebc+sQ2bDzVjclkefv/OUSy9fyOe+GddWKdp16nzOHWuG9fPKseSSSX40qJK/PHdY1hy70Y8uP4QlkwsQWVRFr779124+fdbcbTZb9j/WduCE60XcPey8SjOSYto9I82d+K/36nDp+ePwiWVw4zj80YXAtDWkSK3r3PIHL+oPSWEWCuEuNr085sY/3YzgEcA/BBAOoCWMMeCr/e4EKJKCFFVUmKZDnrAHGnqxLjibBABr+2J/eUx4/UK7DrZhjkVBcaxaSPyAAD7gvTBrXWtmD0qH6+vXoKvXzkBFcOyjN/NHV2AbpfHGHzOdfXhYGMH7rhiLIqy0/D0ICWo/uDyePH420fR0tmH5o7Qh3Hv6XbUNHTg01UVKMhKw4LKYVjnW4x8cecZKAR8dNYI4/zbFo1BUXYaHlrfPxmkx+XB6r/txD0v7Iu6SPZM9UlsO9aKH6ycgmJf1E1WmgOjh2XhYGNknba5oxfbjrXiuhnlAcfnjS5EZ68bh5v8g8be0+3odXuxaHwRAEBVCO4BvIyHGjvgFcBUn5c4p6IAw/PS8d6RkFfAkh6XB4+/fRTfenoHdp9qi3q+xyuMZ+u57f5BdGtdK063dePWy8Zg5cxyzB5VAABwe/rfproWTZ4ZW5zT7+9+ddk4OFXCw2/VYumkEvz5jgX45Y0z8en5o/DbDYejav6/eu0gOnpceOTmedj2w6uw9o4FePmbV2D6iDz8x8v78eA662fvpV1nkKYqWDG9DADw/eumYHZFAcYUZePZr16Gx744H89+dRF+/skZ2HvmPK7/z39iwwHtWX9y6wkMy07DR2aUYXxJDg5HMPo7TrTB7RW4a+n4gOMFWU44VUKTxXum03C+Bzf913v46cv7I/4bDJShjN5ZB+CUEGIvgBsBvB/mWMI40tyJBWOH4ZIxw/rlMZk52tKJjl43ZpuM/oTSHKSpCvabPCqPV2Dv6fMBg4MZfcTXdf0PfAt7V0wsxk3zR2HDgSZLrbXH5cEtf9iCh9Yfipv++ea+RtSf167VaLHI9Ez1SaQ7FFw/WzPsK6YPx6HGTtS1dOHFXWewaHwxSnL94Y5ZaQ7cvWw83jncgm11sS9Y/nXrCTR39KKjx43a5vAvVFNHD37x6gEsGDvMmDnpTB6ei5oonv4b+xogBLByZqDRnz9G65MPj7cZx/QF1/ljNG/NqSgDMpA19do9TSnXHAQiwqLxxXj/yNmYokCO+9ZIPF6Bu/5nu+XgbOZE6wX0uLzIcCp4fudpYwb3jx2nkJ2mYsU0zeipiqa3D8zod0JVCKMKM/v93dLcDNzzsem44/KxhjSkKIRf3TQL188egfvfPBhWM99z6jzWH2jEnYvH4aOzypHu0Gbc00fk4y9fXohPzx+FRzfVhsw0vV6BV3bXY8mkYuRnOgEAGU4VL3z9cjx39yJU+TxyRSHcsnAM3vzOEowtycada6tx3xs1WHegEZ+aPwrpDhUTh+fgcGNH2HewyyeXFWQ5A44TEUpy0sOuo3i9At/9+070ub249bIxMf5r9o+4GX0iKiCih/TPQojjALYQ0VYAHUKIfVbH4nX9aJzr6sPZrj5MKM3BdTPLcLCxI2DqFiu6kZ5rMuZOVcGkspyASIAjzZ3o6vNg1qgCWDGqMBPFOWkBRj/NoWDWqHx8pqoCbq/Ac9tPh3zvmeqTeLf2LB5afxir/7azX+Gi4fjTe3VI903Pgz2QHpcHL+w8jetmlBkvyjXTNFXu/jcO4kTrBdwwewSCuWXhGJTkpuM36w7GdA89Lg8e23wE44q1OOZw0lBtUyc+89j76HF78YtPzgxYJASAKWW5ONbSFXEx7rW99RhXko1JwwM91NHDslCUnRZw7Q+OncO44mxjUFNVgtsbXt45de4CfvnaAXzrqR0BMtD++nZkOlWMMc32Fo0vwlnfDC8adS2adPiLT87AuQt9+PqTH0ZcWzjYoD2LX1k8Di2dfXj7UDN6XB68tqcBH5lRbkiTTt+egUhtCsexlgsYPSzL+Bv95eaFo3HP9dMCpCFVIfzHDdOR5VTDyjwPrj+E/Ewnbr+8MuR3RIR7rp+GEQWZ+O4zO9Flkq22nziHhvYew3mJRnl+Jv5+1yKsnFGORzYegccr8PkF2v6CiaW5aO9xoznMmowul+Wkh26FKsnLCDtoP/FuHd6tPYsff2waxpX0fwYVCwM2+kKIq4M+twkhVgcdu18IsVAI8bNIxxKB7jmOL83BR2ZoXs5rPm+/tqkTN/9+C96tjT7V3nWqDbnpDowP6pBp5XnYf6bdGPl3+SJ8Zofx9IkIcyoKseOkZmC21bViTkUB0h0qJpTmYEHlMPztgxMBnkSPy4NHNx5B1ZhC/OtHJuOFnWfwhT9sHVS8/97T5/HBsXP4ku8FCg4ne7e2Be09btw0f5RxbFRhFqaPyMMre+qRpiq41vfvaSYzTcXXlo3HlqOtxvQ4En/ZchzNHb34xY0zUZjlxIcWRn/zoWZ88tF30dnrxlNfWWjo8WYml+XBK8Ivsl3oc2PL0VZcO70sZMAgIswdXWgssHu9AtuPt6KqstA4xxlG3jl+tgtf/Z/tWHLvRvxu81G8uOsMqk1tqGlox+SyXCiK/5qLJhQDAN6z2PcQjG70V84sx69vmoVtda24/83wA2pNQweIgK8sGYdh2Wl47sNTWLe/ER29btw4b6RxnkP1efoDkKyOtnShsigr+on9pDA7DXdcMRav7KkPWI8AtPW0t2qasGrJOORmOC2/n5vhxAOfno3jrRfw81cPGMdf2nUG6Q4FV00NWUoMS2aaiv/8/Fx8/7op+ObyCRjrc0r0Z6+20fo56+p1Q1XIcKbMlOamWxr9/Wface/rB3HNtOH4/IKKkN/Hi5TZnKUbgQklOSjPz8ScigK8vrcBb/sMyXtHzuL37xyN+nd2nmzDrIr8gJcX0Iz+2a4+w1PWBwfdc7Vi7ugCHG3uwum2buw9046FY/0LPp+9pALHzl4IkKGeqT6JhvYefOeaSfjasgl4+PNzsfNkW8SXP5iXd5/Bd/62E8d9i85/eu8YstJU3L10PBQK9fSP+WSF6SPyA47r3v7SySXGDCCYzy8YjcqiLNy5thr3vLA3bJhad58Hj20+ikXji3DpuCLMHV2ID4Mim9btb8Ttf9yGkQWZeP7rlxtySzCTfZp5OIlH30QWzljNG1OAoy1dONfVhyPNnTh3wWVM+wHAoVrLO09uO4E39zfgq0vHY/2aJUhzKEaEkBACNQ0dmFoeuINzZEEmKouy8F4Mzsaxli4U56QjN8OJj88Zic9UjcIT/6zDsZYuy/MPNnSgsigbeRlO3DB7BNbvb8La94+hLC8Dl44r8rfH9xxHmjU0tffg44+8iyOmmbEQAsfPdg1Iz4+FO68Yh9x0R8i60IPrDqEwy4nbFlVG/P7CcUX4yuJxeHLrCVz9m8246oFN+NsHJ3HV1FJL7zsSikL46tLx+O6KycaxiT6jH07X7+r1ICtNDXEsAKAkNz3kPfN6BdY8sxP5WU786sbQGWw8SSmjn+FUMLJA0x9XzizDntPncfufPsDIgkx8Ys4IvFvbEjHkq8flQU19h6VOP81nFHWJZ/ep85g5KnRwMDN3tPZ39GiDBSaj/9FZ5Zg+Ig/f/ttObDzYZHj5CyqHGYuKN8wegSsmFOP9o9E9RZ2nt53EP3acxjUPvo17X6/Bi7vO4KZ5o1CQlYbinPSQqIL6tm5kOBUUBmmTH51ZDodCIZq6mQynihe/eQVuu6wSf9lyHFc9sNlStvnr1uNo6ezF6qsnAQDmjS7AkeaugHj7//7nUYweloXn7l6EUYXhvcvKoiykORRD3gimvVubdocbqIy1lpPn8MEx7V7N0RcOxVre6XV5kZ3uwL9+ZAomlOZiycRivLlP26jW0N6DtgsuTPXp+WYWTSjG1rrWqBFBdS1dGFvsb/f3rp0Mp6rgV6/VWJ5f09CByb40AZ+aPwp9Hi8+OHYOn5g70tDxtfZoJiBSpMire+qx62RbwIytqaMXF/o8AfcUT/KznPjy4rF4Y18j9p4+j44eFx7bfASbDzVj1ZLxMRnu766YhDsuH4tJw3MwpSwP104vw9eWxWdrUEluOvIyHGFnlJ297rD3WJqbjtauvoAd4I0dPahp6MDXlo0f8nQgtjT6mw424cr7N6HVJHvUNnViXHGOYYSvm1GONIeCKyeX4tm7F+H2y8fC5RFGKKLOz1/Zj0c21hoLs26vMCIezOhe3P76dvS6PThQ3x5Wz9eZNaoACgFPbTsBVSHD4ACawfzrnQsxsTQHd63djn99djca2nuw+uqJAV7AJWOH4WhzV8w7UmsaOnDVlFKsmDYcj246gj63F7ct0haMhudloDFogan+fA9G5GeGeB4Th+di+4+uMTz+cORlOPHvN0zHC1+/Aj19HjxlEZW0+VAzppbnGYOe3/C2AdAWl7fWteLjc0YiO8rL7lAVTCzNCevp67ONvDDSwOxRBVAVwvbj51B9rBXFOWkBswKHSpaevtvrNbxmAFgxrQyn27qx70y7fxG3zMLojy9CZ68bu037HqyoO9tlSAuAthB699LxeH1fA7YGDfrdfR4cO9tlzHqmj8gzYsvN0o7eHgDGhiMrNtQ0AQB2nfTf49FmbYZRGWEmO1juuGIs8jOd+MaTH+KyX76FX71Wg8vGFcW8wJnuUHHP9dPw6C3z8cgt8/Dw5+dixsj86F+MASLChNKcgEgvMxf63GGf1dJcLXfV2S7/O3v6nJaeYewQ/nvq2NLoF+eko66lC2/u8xvwI82dARpwxbAsbPvBVfj9rfORk+7ArFH5GFmQiVf3+DfnbDl6Fr9/pw73vXEQX/rjNrzle/jn+Dx0M7kZTowpysL+M+04UN8Bl0dgTkXkBywn3YFJw3Nxoc+DGSPzQx6Sgqw0/PXOhZhclosXd53BgsphuGx8UcA5uiQUy7b+s529aOnsxWXji/D/bp6Hv965EA98ejYmlGoGoTQ31NM/c74b5QXWCdbys6wNpxUzR+VjWE6apUfb5/YiL8Pf9tkV2mCo6/qv7qmHEMD1s8tDvmvF5LLcsLH67d0+ox/G089MUzGtPA8fHm/DB8dbcUnlsIABz6EocHtFSNSG2yMCEqldNbUUCgFv7mvAAd+sQzfCZi7zSS2RJJ6OHheaO3pDDOydi8ehPD8DP3/1QEAE0OGmDggBw9ATEdZcMwm3X16JSUFJwnRPP9xMo7PXja2+zW76TnQAxp6UoTRSeRlOfOPKCTh1rhtXTS3FC1+/HE+tujTqwJ8oJpbmRvD0PWHvUw8KML9rp3xGP9IsNl7Y0uhPH5GHMUVZeNWnh3f3eXC6rTtk4a8gK814oYkIK2eW4Z+1LTjvMwy/XX8YJbnp+OnHp2NrXSse3XQEIwsyjZE6mGnledhf324s4kbz9AFgrs+rXWBaLAy+x798eSFuWTgaP/n49BCPe+bIAqQ7FGyri74RSjeEuvG5fEJxwAJtaV5GSChZfVsPyvP7H5JnhUMhuCxkBI9XGB4nAGSnOzClLM/Q9V/adQZTy/OMwSkaU8py0dTRa7nA3d6jyTvhPH1Ak5e2Hz+Hk63dAXq+3gb9ns24vQJOk6dflJOOSyqH4Y19jThQ34GRBZmWklJRTjqmludFXMzVwzWD14cy01T8y7WTsfvUebywyx/pVRPUzwCwYnoZ/u366SF/W/93Dyfv/PNwC/o8Xlw9tRSn27qNBci6li6kORSMiNOzEY47F4/F3p9ci99+bm7YoIhkMXF4Dlo6+yyfs65eN3LSrXcol+pG36Tr64nYdPl5KLGl0SciXDejHO/VtqDtgrYgJwQsoz3MrJxZDpdHYN3+Rmyra8X7R8/iriXj8MXLKvG/dy/C+JJsXD21NOz3p5Xnoa6lC+8daUFxTjrKY0hBrOv6l1RaL0wCmkf980/OtNSE0xwK5lQUGHH+kdBDA608TkB7GM929RmLei6PF40dPRgRp1TKDkWBx1IaEVCVwEdx3pgC7DzRhhNnL+DDE20xe/mAFsEDWC/m+j398N7ivDGF6PP9G1wSNBg7jBDHYE8/NGXytdO10OB3DjeHLOKaWTS+CNXHz4UNMz3aEl5K+cSckZgxMg+/XX/Y8PYPNnQgw6nElMbXqcs7YYz+WzWNyM1w4I4rxgLwR6XV+SJ3Iq1ZxQMiQoYztvQOiWa8HsFjEfrd1etGdloYeSdPM/rmCJ5T5y6gKDst5lQWg8GWRh/QFhrdXoE39zcaUQfBYZbBzKkoMCSe3244hOKcdNyyUNMPZ4zMx4bvLsO/3xDqLenoO3M3HGjCnIr8mFbgPzarHD9cORVXTgk/mERjwdhh2HfmfNSt9AcbOlCY5URJmIWi4XkZEALG+kBjew+EAMrj5H2E283q9noDvGRA0/W7+jx40Be98bGZscVWA/5drzUWi7n6LC5cuJ9+bQDI8kk9ZsJFu7iCZiuAtokNANouuCz1fJ3LJxShz+3FljAL8nqETqWFEdcjS46dvYD1voXWmoZ2TBqeG7BgGw7VWMgNlXe8XoG3apqxdFIJ5lRoax27fLuBNaOf/NzwycSI4LEI2+zsDa/pF2Xrnr5/Vn3qXPeANrkNBNsa/Rkj8zCqUDPgtU2dUAiojBJpoM0QyrDpYBPerdW8/OCRN5Ih142+2ytiknYAbffqV5aMG/AGF0CbJXgFAmLbH91Ui7XvHws4r6ahA5PLcsO2oTRIa9R36cYyY4kFh0qWxsXtESEGSje8/9hxGrMrCjC6H/HgJbnpKMxyWur67T0u5KY7IhrEUYWZKM1Nx7zRhSHeezg5xO3xwhk0WxlVmIUZI7VnwmqWpqPvaH504xHLHZ51LV0YkZ8R1uP9yPQyjCzIxB/+WQdAG9wnx1jgwz+IhV53z+nzaOnsxdVThyMrTVt/0nMTnTh7AWNLUtvoj8jPRKZTtdT1u3rdyA4j76Q5FAzLTguRd0ay0R8cRISPzizHu7Ut2H78HMYUZRvbtSOxclY5vAIoyk7DLZf2r7pPWV4GhmWnAQi/KWsomDemEAr5UznsPX0e971xEA9v8E/5vV6BQ40dET1Ofdqpp2I4E2edMZynH6zpA8CYIm13LABcPyt2aQfQ+n5ymXU6hvZud9hFXPP3H/vifPzk41YauPbKBBtJq4EL0Awy4HcIrMhwqvjm8gnYdqwVmy2yL9a1dEWMknGoCm6/vBLb6lqx8WATWjr7wkp4wRg7ci2M/oaaJigELJ2k5cCaU5GPXSfbcKatG30eL8amuKevKOEjeLr6wi/kAoEbtIQQOH2uOyF6PmBjow9oaXNdHoH3jpyNKu3ozBlVgGWTS/B/rpuCrDCaXDiIyJADZsUpNCwWctIdmD4iH9vqWiGEwE9f3u+Tafqw94wWZne6rRsX+jwRjcHwPM2j1z0Qw9OP08MYLm+Nx0LT13fHAtqehf4ypSxPS3IWNMi097iQmxG9X+eNLrR8ZsIt5Lq8wtDHzXz5inF44ktVUaNcPnfJaIwqzMR9bxwMuedjQeGaVnz2kgrkpDvw4+e1lNaRBnczRu4dixnYWzWNmD+mEIW6IzOqAO09bmw8qEWxJSK88GJnYmlOiKfv8njR5/YiJ4L9MG/QaunsQ6/bm5DIHcDmRn+2LwwTiL6Iq6MohD/dviDipqNIrJxZjmunDzdelERxSaWWFvjl3fXYWteKNddMAhGwsUbzHHWvNzhkz0xRdhrItCu3vq0buRmOfu9gDIeqkGWUiMtC0weAu5eNw79dP21A0UOTy7RQWD0UTqe92xXV049EOE3f47WufZuZpmL5lOjb/tMcClZfPQn7zrTjdVOo8bmuPrRdcEU1sLkZTnzukgqjvbF7+tYJ1xrO92Dv6faAe9dDlf+xQ4sUYqOvLa7Xn+8J2Gil5/uJ5OmX5Kaj2TejTmTkDmBzo69r9AAwPkH6480LR+N3X6xKyLXMLBhbiF63F99/bjcmDc/B15aNx5yKArzl88oORogV13Goim9Xrk/e8W3MiheOMMnKPGGkkfljhuH2y8cO6Fp6MrUjQZEV7T3uiOGa0XCGid5xeUTA5qyB8Mm5IzGhNAcPvHnQiJuv60c8/Jcur4SqEIqy0wKynkbCESbhmr4Ibs47NLE0F1lpKnacaEN2mhrzNeyMvlNdDxAAIidb0ynNzUBzZy+EEDh1TgvJZU0/TnyqahSG56VHDIm0A3o8eVefBz/86DQ4VAXLJ5di96k2tHT24mBjJ0YVZkb12ktN084zbeE3Zg2E8NE7oZr+YNH3UgRnQdQ8/YHPXFRD3gk0km6Pd1CL8frf/t6KSTjS3GWU9Kzrx87XUYVZuH1RZcxZJAH/zMVqEAOADNM6mKqQsaO1sjh7SPPDyII+azTnlerq1UJvo3n6Lo9A2wWXsRuXjX6cmFKWh60/uHpIt4tfDBTnpGPmyHxcPXW4sfB25ZRSCAG8fagZBxvaY4roGJ6XYSzk1p+P38YswLebNaymH18Doi+otwZtnGnvdg3S07eOdonXwHXt9DIsmVSCX7x6ALVNHTh2tguqQqiIUe/90cemRQwrDsYw+iEL09qgFtwmPe+U3d+nWNGNvpWnnxUmegcI3KB16lw38jIcg3ou+4PtjX4q8cxdl+HRW+YZn6eV56EkNx1v7GvA0eaumHRe3dPvcXnQ2tUXt41ZgGZgLDV9j9dIBxAvstJUZDiVgBq0Hq9AR687bLK1WFAV62iXeMg7gCZJ3v+pWchKc+CbT+1ETUMHRhVm9qscYX/wRyOF7jsAEDJ70fNORcoem0rkWxh9vd5wZHnHv0FLC9dMzCIuwEbfVmSmqQHGQVEIV04uwbr9jXB7RcxGv6Wz11gQjFfkDhC+AInHGx+DaYaIUJSt7TDW6dRTMAxmIVe1jnZxx3HgKs3LwH2fmoUD9e1Yt79xSDdBhU0r4RsEgiOSqioLkeFUwlaESzV0o9/ebZZ3fAu5UaJ3AG2D1ukEbswC2Ojbnisnl0J/n2MJ4yv17crd68v6OCKOmr4jgqavxlnTBzSJxyzv+DNsDlzT1zdgBbfDaq/BYLhq6nB88VJtN/hQRsmEK6Kiz2SCI5KG52Vg+4+uwfJB7CC3E7okEyjvaJp+RE/fFB596tyFhEXuAMDFka6OGTIun1hseHOxGA992qlnVIxr9E4ETT/enj4AFOWk4Wyn3+ifj5JhMxbC1ZR1eQe/kBvMDz86FR09rpBavvFEH8SC5R0995BVKO3FkuXyYiCipx9B089JdyArTcXhRq2saiI9fe49m5OX4cRl44vQdsEVky6sb9DSjX7ZEGv6QghtETTOmj6gefrmvCjRcunHgjOsvBP/gSvDqeKhz82N698MRlEIROHlHau9B4yfNIeCTKdquZAbbXAszU03yqWy0WfiyoOfnROxHJ4ZPRXD/jPtKMpOi2uGQ03TD5VFAAyNp5+dhrNdWiw0ERlVs+IRsmm5kCupgXQqimU0EhAavcOEkpfpCDD6kerjminJTTeqs40suIgXconIQUTfJqLQJCGh5z5MRJuJ6AnTsQYi2uT7md/f6zP9R0vzHJsnUZyTDiJteh/PGH3AutSgblyGQtMvyklHj8uLC32axqpPwQcTvRNuc1Zw5SyZ0KqBBUXv+AaBNEkHskSSn+kMMfrZYerjmjHX5bjYF3IdALYC2B3pJCLKBvCsEGIpgEYiWuz71etCiGW+n+0DuD4zhDhVxUh0Fs8YfcA6n/5QevrBsfqGvBOP6B2PhbwjqVdstWnOkHckHcgSSX6m05hFAtoGyVhSl+gRPFlpKgr6UYVusPTb6AsheoQQWwCEL6qpndclhHjb97EVgB4wvYSI3iGiB4i39F2UlPg8kHjG6AN6GoYwUSJDoOnrg5cettne7QIRIibCioaRe8fC04/3Qm6icKpKyAxMb1+8N83ZEUtPvx9Gf2RBaA3qoSTqU0pEtxLRetPPmv5cgIgmApgnhNjmO7RaCLEY2oxhpcX5q4iomoiqm5ujKkjMEDDcp+uPiHMYmVXCNd3YDIWXXOQrFqNv0GrvcSM33TGoak+OMEVHhmIhN1E4lNBi7y6PF06VONVCDORlOEMWcmMx+nqkXCKlHSCGhVwhxFoAawfyx4loBIB7Adxm+nsv+v73FQBzfP81X+9xAI8DQFVVVcTZBDM06A9jPDdmAXqN3ODslEPnUVp5+oORdgD/4GRe+DQikCT19K32T8Rzs5ndyct0hoRsxiLv6LH6icq5ozPUvfoAgLuEEO0AQETDiGi573dVAGqH+PrMANDDNuMt76gKQQgE5It3J1jTH2x+E7+nH9oGq5h2GXCoiuVCrqxrFIkmP9OJjl638Ux09XqQFUOtW71saSIjd4A4Gn0iKiCih0yfFwBYBuAZX6TODQDaAdxMRJsBTAXwQryuz8SPisIsKARUDIvvw2gV+eL39OPvfwTn39GqZg0uStlqIVeXRoYiAikROFSyXKPgyJ3Y0KPBOnyBAp0xevrjSrKxZFKJkSAxUQz4DRBCXB30uQ3AatPnbQCsthLeOdBrMonhE3NHYmp5nuHxxwvVIs+LK0yOl3gQnH/nfLcrap3kaFjVlNXXJYJr5MqCQ6GQqCqZo5ESjTnTZkFWGi70xabpZzhVrL1jwVDfXghyPqXMkJLmUDBzVPzLPfpzt/u95KHU9IHA/DtxkXdUC3nHyFMjp5F0KBbRO56h2SVtR4IzbXb1Rq6Pm2y4V5mEYbWbdSg1fSAw/05cFnKNkE2/kXR55U5Z4FQpZEeuHr3DRMds9PvcXvR5vMiJkHcn2cj5lDJS4kiwpg/4PX23x4uuPk8cFnItBi6P3Au54UJpZR3EEo0/6Zo7pvq4yYZ7lUkYVrnbXWEqNMULPf9Oe8/g8+4AptmKpbwj5+vkUJXQIioS7ztINPozdb7bFXOytWQi51PKSIkaQdMfOnlHy7/TcF4rATlYT5+I4AzKVWPIO5IaSaflTmnvkFXrshtmeaerL3oBlWTDvcokDEtpJAELuQBw7KxWYHwwydZ0guUQY+CSVANXFcUigRx7+rGS6VThVEkz+kZRdNb0GSaipj9UkSL6rty6Fs3oD3YhFwhNRWxIVJJGuziV0CybfW7W9GOFiIz8O7qmH0ucfrLgXmUSRlI0fd+ux6PNutEf/MsYXOvXWMiV1NPXUiuHevqyticZ5GU40d7j4oVchjGTFE0/SN4ZrKYP6HHtoZuzZPWMreL0OfdO/9Dz73Syp88wfpKq6cdT3gleyJU8ZNMq5bXLw55+fwiWd9jTZxgkR9M38u909UFVCNkxJMKKhhqUitjIvSOr0bcoWC9zfYBkYBh9X5W2WBKuJQvuVSZhJEPT1/PvAEBehiMu+eG1oiN2kncsylhKXPM3Geh1crt63XDEUB83mVy8d8bYjmRo+oBf4omHtAOEGkk7LuT2ebzSylXJIN+k6Wenx8e5GCrY6DMJIxmaPqDl3wHis4gLWMg7XslDNi125HKWzf6Rn+mEVwBN7b0X9SIuwEafSSBWqZWHWtMHzJ5+fF7GYHnHJbmnz7l3Bo++6e/M+e6LemMWwEafSSBWRVTcQ6zpA/6wzXh6+mbPWPf0pV3ItSii4vIIlnf6gf5snWnrvqgjdwA2+kwC8Xv6ZoM59Jq+vkErXkbfqVLQYrTu6cv5OjmV0HKJbg9H7/QH3dNv6ey7qPPuAGz0mQRiVXVqqIuoAPGXd4JDHOXPvUPwBtUudklc6D0ZmIMEWN5hGB9Wmr47AZq+Lu/EI9kaoMsh5ugd2RdyQ9NFcxGV/pEfYPTZ02cYAEnU9HV5J44hm9byjpxG0r9pTusLj1dACHkHsWRgfrY4eodhfETS9NUhjGsePSwLWWkqJpTkxOXvaUVH7LU5C/D3xVBvmLMjuekO6I+w7Tx9InIQ0beJqDmGcxuIaJPvZ77v2Boi2kJEawZyw4y8hNP0FQKUIdb0d//bCiyaUByXv6d5+qG5d2TNPx+8f0I3/mmSDmLJQFHICBSwo6fvALAVwO4Yzn1dCLHM97OdiCoAjBFCXApgORFNGMD1GUkJp+knQkaIpxfuUBXL3DvSGn1d3vF5+ImQ3OyIruvHI7/TUNLvN0EI0SOE2AJARD0ZWEJE7xDRA6TtS14B4Gmf118KYHHwF4hoFRFVE1F1c3PUyQQjEeESrslmXBxK4EKuR/Y4/RB5R+6av8lCjw7Lkt3TJ6JbiWi96ac/ssxqIcRiaLODldAMfR+AewCsAjA8+AtCiMeFEFVCiKqSkpJ+XIq52HFYaPouj1c6Y+lQCB5PYHijU6WLOt9KJPyefqCmz5uz+ofu6V/s8k7UuxNCrAWwdiB/XAjxou9/XwEwB0AzgEcA3AkgG0DLQP4uIydqGE1fNlnEoSoBO1hlLziiRx3psxc3e/oDwpB3LnKjP2S9SkTDiGi572MVgFoA6wCcEkLsBXAjgPeH6vrMxYdVamW3V0CVzGA6lNAiKrJJVGaC11p04y9rCGqy8Hv6NtP0w0FEBUT0kOlQO4CbiWgzgKkAXhBCHAewhYi2AugQQuyL1/WZix81SDsGAI+EFZqCK025vV7pZitm9FmKyxPo6XMahv6hR+9c7J7+gO9OCHF10Oc2AKtNn93QZJzg790P4P6BXpeRFyIKDXf0yqfpO9XQNAwySyHGjtwgTV/mgSwZ5BnROxe30Zf3SWWkJDgXvYyafnAqYtkzUgbPwIyFXIkHsmQwY2Q+KoZloti3A/xi5eIekhjboVWdCtb05TKYTiU0947cnn5QnL7kCeSSxdJJJXjnX5dHPzHJyPukMlIS7CVrmr5cj6GqKBDCvPAp90Ju2DQMki2wM7HBvcokFK3qVGABEtk8fYeRldK/g9UpsYF0BGXZlL3mLxMZeZ9URkqCPX23hJp+8MKn2yOfRGVG9+j98g5r+naGe5VJKA6FQjZnyWYwVSVwB6vbK1/YqRnd03d5gtMwyNsmJjxs9JmEogaVGnR7EpNwLZ44g+UdyYuI6//+Ho7eSQm4V5mE4lSU0I1NknmUhhxiSlAmm0RlJnSNQu6soUxk2OgzCUUN2pwlY8hmcLSL7EXEncaOXPb0UwHuVSahqBaavmwepeEZm+LaZZutmFHVwOynHKdvb9joMwnFYaHpy5ZwLThbqOzyjjOoPW729G0N9yqTUBwWmr5skS+6MdQHL49X7tTKwZWzjELvEreJCQ/3KpNQghOuyazpm7NSyiyFhMu9I3ObmPCw0WcSiq00fVP+eZmlEGfwjlzW9G2NvE8qIyV20PT9ce0mT1+ygctM8I5cf7lEufqFiQ3uVSahBGv6Hgl3szqsFnIl9vRDQ1AFFAIUiQcyJjzyPqmMlIRq+jImXAtcyJW9cpaiEBQyFVGRXK5iIsM9yySU4CIqMiZc8+eq0QYvj+QLuYBe7N0vV7HRty/cs0xCCa4v65FS0w8qL2gDz9ihEDymHbmyD2JMeOR+UhnpUBUlJLWyfJp+YO4d2RdygcCKZi4Jk+AxsdPvniUiBxF9m4iao5w3mog2+X5OEtEnfMcbTMfnD/C+GUlxKiR9ERVzlk0hhC8Ng9xG0qkqpn0H8m2YY2JnIDVyHQC2Atgd6SQhxAkAywCAiF4D8IbvV68LIb40gOsyNsAOmr5qknd071jmwuhAYHEb2XMJMZHpt3sihOgRQmwBIKKeDICIxgE4LYTo9h1aQkTvENEDRBTyZBHRKiKqJqLq5uaIkwlGQsyavtcrIASk0/SNQuJeYQxgquRGUvP0/Zq+7GsUTHii9iwR3UpE600/a/p5jc8CeNr0ebUQYjG0GcPK4JOFEI8LIaqEEFUlJSX9vBRzsRPsUQLy7fz0e/pef2lByQauYLTB2BS9I3l7mPBElXeEEGsBrB3ENVYAuM/09170/e8rAOb4/sukCA5FCanFKpu8Y07D4LZJaUE1YCGXo3fszJAO5z5pp0EI4fZ9HkZEy32/rgJQO5TXZy4+zFEi+n+lW8g1pS3QY9ulX8g1DcYuGyxMM+GJW88SUQERPRR0+OMAXjJ9bgdwMxFtBjAVwAvxuj4jB6pJ0/dIWpZPtfD0ZV/Idaj+BXa3xyt9e5jwDCR6BwAghLg66HMbgNVBxx4M+uwGcOdAr8nIj8NS05fLq3QqoQu5srUhmIAZGO/ItTXcs0xCcfg2Z2nx7ZJr+mZ5R7I2BONQFaM/XBIWq2dih40+k1DMGR2NcEfJDKY5y6ZH0gikYBymOgccsmlvuGeZhOIvwi2vwSQiI/TUqDIleYijuc6BHdJKMOGR+0llpCPA09eNvoQGU1UILq/Xv5Ar2cAVjDmUlj19e8M9yyQUo+qUR15NH9CidQLaILmRdKp+eYfTMNgbuZ9URjqMXPQmL1k2TR/QFz6FYShlHLjMBOyU5ugdW8M9yyQU3cDLrOkD+sKn198GyY2+uYiKi7Ns2ho2+kxCsYumry98Ggu5knvGTlP2U5fHK2WfMLHBPcskFNWk6cvsJTsULSulXRZyzcVt3DYo/8iEh40+k1CcAZq+5iXLqelrWSn9i9Fyv0raQq5/cxZr+vaFe5ZJKGZNX9bUyoA/bYHLJp6+uc4Bx+nbGzb6TEIxFxX3GFk25XsM9bh23dOXcbZiRm+PXv6RPX37wj3LJBRD0w9YyJXPYOoLuX5NX+5XSZ+5GOUfJZ+5MOGR+0llpMNfgMSv6Usp7/jKC8osUZlxqArcHvtEIzHh4Z5lEop1yKZ8BlPzjE0Dl4QSlRm9PXbZbMaER+4nlZEO1TaaPvk8Y3vIIQ6V4BVAn9tX85c9fdvCPcskFIdNNH2nLw2DfXLvaPff4/IAkF+uYsIj95PKSIddNH1VIa2Iik3kEH0Gpht99vTtC/csk1DMIZuyFkYHNDnH7ZV7V7EZ/f67DaMvd3uY8LDRZxKKalrI9Uice0f1afoy7yo2o3v23X0+eUfCPmFiY0A9S0QPE9FmInoiynlriGgLEa2JdIxJHSw1fQm9Sr2mrMsr4FQJRPK1wYzKnn7K0G+jT0TZAJ4VQiwF0EhEi8OcVwFgjBDiUgDLiWiC1bHB3DwjH5aavoReslPfzGSTjJS6ke9x2SMElQmPo79fEEJ0AXjb97EVQG+YU1cAeJqI5gMoBbAY2iATfKzW/CUiWgVgFQCMHj26v7fHXOTYRdNXFcUI2ZRxphKMbuQ5esf+RB3OiehWIlpv+lnjOz4RwDwhxLYwXy0F0AfgHmhGfHiYYwEIIR4XQlQJIapKSkoG1Cjm4sWyiIqEXqXTlGVTxplKMLqR1+WdNI7esS1RPX0hxFoAa83HiGgEgHsB3Bbhq80AHgFwJ4BsAC2+41bHmBRBN/D6jlwiOT19h0rGBjPZY/QBf78YC7k2aBNjzUB79gEAdwkh2iOcsw7AKSHEXgA3Ang/zDEmhfB7+l6fHi6fwQf0IipanL5T0jaYCfb0Wd6xLwNZyF0AYBmAZ4hoExHd4DteQEQP6ecJIY4D2EJEWwF0CCH2WR2LRyMYeTCKqPi8ZBm9fEBbm/DoC7k28IodwZuzJJTcmNgYyELuNgDlFsfbAKwOOnY/gPujHWNSh+AiKjLq+QCgqgSXV8DltclCLqdhSBnkfOMYaTFr+h6JDabTV1PW7fHawit2cpx+yiD/08pIRYCmL3Hki15ExeWRV6IyY2zO6uMsm3aHe5ZJKLqRd3m0qlOyGkyzBm4HrzhU3mHTYFe4Z5mEoigEheTX9M1G0g4G0hkUvWOHiCTGGvmfVkY6HIoivabv9/TllajM+OUd9vTtDvcsk3BUhXyavl3kHflfI6OIipujd+yO/E8rIx0OlXyavrxesu4Jd7s8tjCQjiBPn9Mw2BfuWSbhGBubvELK+rhAoKcv68BlJiThmg3axFgj5xvHSI1q0vRljXzxL+TaI7VycBoGWWU3JjryP62MdDhsoOkb+efdNpF3VL+8Y4eiMEx42OgzCUfPUCmzpq8PVkLYYyOTX96xx8yFCQ/3LpNwHHrVKYk9fbNhlHXgMqN7+n0ery1mLkx42OgzCUf1LeRqmr6cj6DZ0Nshpt2cP4gjd+wN9y6TcLTNWXJr+mZv2A6evhowiMnfHiY8bPSZhKMq8mv65hmKHYykM2AQY7NgZ7h3mYSj1ZeVu4iK+b5llajMEJHRJlnDaJnYkP9pZaRDNW3OktWrdNpM3gH8A5kd1iiY8HDvMglH1/TlTrhmlnfs8RrpmTXtMogx1tjjaWWkQtf0XR6vPeQdSdsQjD54pTnYLNgZ7l0m4ThMmr6sXqVZx1clna0E42BPPyUYkNEnooeJaDMRPdHf84iogYg2+X7mD+T6jNzYIeFaoKcvZxuC0aU2u8hVjDX97l0iygbwrBBiKYBGIlrcz/NeF0Is8/1sH/CdM9Jih4RrAQu5krYhGH2dQtY+YWKj30ZfCNElhHjb97EVQG8/z1tCRO8Q0QPEWZ1SEj3hmsyavkO130Ku4enbZObCWBO1d4noViJab/pZ4zs+EcA8IcS2KN8PPm+1EGIxAAeAlRbnryKiaiKqbm5u7neDmIsfPeGazJq+w44LuRynnxI4op0ghFgLYK35GBGNAHAvgNsifdfqPCHEi77/fQXAHN9/zdd7HMDjAFBVVSWi3R8jH4EJ1+T0Ks1GX9bZSjD64rQdNpsx4Rlo7z4A4C4hRHt/ziOiYUS03Pe7KgC1A7w+IzGqopgSrslpMM2Sjl2MJG/OSg0GspC7AMAyAM/4InBu8B0vIKKHopzXDuBmItoMYCqAFwbdAkY6HArB5fFKnYYhMMumnG0IRjf2dpGrGGuiyjvB+LT5covjbQBWRzsPwJ39vSZjLxwqodft1f5fUgPjsGGCMmNHrk0GMcYaezytjFQ4FDIKcMur6ZvlHXsYSZZ3UgPuXSbhqIpiePqyGkxVIegBx3Yxkk6Wd1ICezytjFSY5QNZNX3AvxNXVokqGL1f7LIwzVjDvcsknIAqTRIbTNVmuWocLO+kBNy7TMJxBsS4y/sI2i1XDadhSA3s8bQyUqEG5KKX18D4NzPJ2wYznIYhNeDeZRKOXYqK++Ude7xGDg7ZTAns8bQyUqHaJIWB02a5ahw2m7kw1rDRZxJOwG5Wib1kvXiKzAOXGSdH76QE3LtMwrFLCgOnsfBpj9eIN2elBty7TMJRzbnoJfaS/dE78rbBjBG9I3GfMNFho88kHLukJVaNzVn2eI2cNgtBZazh3mUSjl00fb8GLu/AZUblOP2UQN43jpGWgJBNiQ2M3XawOjlOPyXg3mUSTsDmLInlHYfdcu+wp58SsNFnEo5dNH3/DlZ522CGE66lBty7TMKxi6avD1gyD1xmeEduaiDvG8dIi100faeqwKkSiORtgxl9bULmgZiJDvcuk3Dso+mTrQykw2ZpJRhr7PPEMtJgJ01f5plKMKzppwYD6l0iepiINhPRE1HOayCiTb6f+b5ja4hoCxGtGci1GflRbaLpOxRF6plKMEYlMBsNZEwo/X7jiCgbwLNCiKUAGolocYTTXxdCLPP9bCeiCgBjhBCXAlhORBMGeN+MxJjlA1ViA6N5+vIOWsGoCnv6qUC/e1cI0SWEeNv3sRVAb4TTlxDRO0T0AGmrXSsAPO3z+ksBRBowGJti1vRlzvMypSwX00fkJfs24sa4kmwU56ShNDc92bfCDCFRjT4R3UpE600/a3zHJwKYJ4TYFuHrq4UQiwE4AKyEZuj7ANwDYBWA4RbXW0VE1URU3dzcPIAmMRc7dtH0Vy0Zjz/dviDZtxE35o4uRPWPrkFBVlqyb4UZQhzRThBCrAWw1nyMiEYAuBfAbVG++6Lvf18BMAdAM4BHANwJIBtAi8V3HgfwOABUVVWJaPfHyIddNH2GkZGBvnEPALhLCNEe7gQiGkZEy30fqwDUAlgH4JQQYi+AGwG8P8DrMxJjF02fYWRkIAu5CwAsA/CMLyrnBt/xAiJ6yHRqO4CbiWgzgKkAXhBCHAewhYi2AugQQuwbbAMY+bBLnD7DyEhUeScYn4ZfbnG8DcBq02c3NBkn+Lz7Adzf3+sy9iEwDQMbfYZJJCyoMgnHLoXRGUZG2OgzCcdhKihul7w1DCMLbPSZhGO3PPQMIxNs9JmEo0s6bPQZJvGw0WcSjsNmeegZRibY6DMJx6g4xTleGCbh8FvHJBzW9BkmebDRZxKObuvZ6DNM4mGjzyQcIoJDIU7BwDBJgI0+kxRUm5UaZBhZ4LeOSQpO1V5VpxhGFtjoM0lBVYhDNhkmCbDRZ5KCQ7FXUXGGkQU2+kxS0Dx9fvwYJtHwW8ckBaeqSF0fl2FkhY0+kxRY02eY5MBGn0kKrOkzTHJgo88kBdb0GSY58FvHJAUHx+kzTFLod41chokHX1s2HnmZzmTfBsOkHAMy+kT0MIDZAI4IIe4Ic85oAGt9H8cD+KYQ4nkiagBQ4zv+XSHE9oHcAyM3188ekexbYJiUpN/yDhFlA3hWCLEUQCMRLbY6TwhxQgixTAixDMBeAG/4fvW6fpwNPsMwTGLpt9EXQnQJId72fWwF0BvpfCIaB+C0EKLbd2gJEb1DRA8QV8VmGIZJKFGNPhHdSkTrTT9rfMcnApgnhNgW5U98FsDTps+rhRCLoUlLKy2ut4qIqomourm5OfaWMAzDMFEhIUT/v0Q0AsAjAG4TQrRHOXcjgGuEEO6g4ysAzBFC3Bvuu1VVVaK6urrf98cwDJPKENF2IUSV1e8GGrL5AIC7YjD44wA06AafiIYR0XLfr6sA1A7w+gzDMMwAGMhC7gIAywA8Q0SbiOgG3/ECInoo6PSPA3jJ9LkdwM1EtBnAVAAvDOSmGYZhmIExIHknUbC8wzAM03+GQt5hGIZhJOSi9vSJqBnA8QF+vRhASxxvRxZSsd2p2GYgNdudim0G+t/uMUKIEqtfXNRGfzAQUXW46Y2dScV2p2KbgdRsdyq2GYhvu1neYRiGSSHY6DMMw6QQdjb6jyf7BpJEKrY7FdsMpGa7U7HNQBzbbVtNn2EYhgnFzp4+wzAMEwQbfYZhmBTClkafiNYQ0RY9I6jdIaKHiWgzET3h+5wS7Sei2UT0mu//U6XNX/D19YtElJkK7SaiHxDR20T0HBE5iOizRLSViO5L9r3FE1/bvu3bn6QfC+nfwfa57Yw+EVVA25hwKYDlRDQh2fc0lIQpamP79hORAuDLAJyp0udElAHgegBXArgR2oYdW7ebiNIAXCqEWALgEIAFAK4XQiwEkGNK4GgHHAC2AtgNWNuyeDzrtjP6AFYAeJqI5gMoBWBZ2csuWBS1WYTUaP9XAPzR9/+p0ucLAXRDq0J3D1Kg3UKIPgBpRDQHwCQA5QBe8ZVjnQJgaRJvL64IIXqEEFsA6NE1Vv076D63o9EvBdAH7aVYBWB4cm8nMehFbaD1qa3bT0RlAEYKIXb4DqVKn5cDyAZwLYCxSJ12vwPgXgBNANIAnAfwGwC3wb5tBqz7d9B9bkej3wytwMsPAaQjBfJ0+Ira3AvgLqRG+28CcA0RbQIwH0An7N9mAOgCsFkI4QWwGYAXNm83EV0CIE8IsQLAUQAeAD8H8DtopVpt12YTVu/yoN9vOxr9dQBOCSH2QtM930/y/SQCc1Eb27dfCPGIEOIyIcQyANsBvAibt9nHdmjTewCYDmAL7N/uMgB61b2zAEoAnAOwHlqb30vSfSUCq3d50O+37Yy+EOI4gC1EtBVAhxBiX7LvaSgJLmoDYDZSqP1A6vS5EOIMgA+I6D0ATiHEZti/3a8BqPQVXroBwJ8A/B7ABwCm+X5vS6ye63g867wjl2EYJoWwnafPMAzDhIeNPsMwTArBRp9hGCaFYKPPMAyTQrDRZxiGSSHY6DMMw6QQbPQZhmFSiP8PgrkiCTjeWLEAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "# plot objective 2\n",
+ "plt.figure()\n",
+ "plt.imshow(simu.t[:,1].reshape((101,101)), vmin=-1.0, vmax=0.0, origin=\"lower\", extent=[-2.0, 2.0, -2.0, 2.0])\n",
+ "plt.title(\"objective 2\")\n",
+ "plt.colorbar()\n",
+ "plt.plot([-1.0/np.sqrt(2.0)], [-1.0/np.sqrt(2.0)], '*')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimizations.\n",
+ "\n",
+ "## Setting policy\n",
+ "\n",
+ "Use `physbo.search.discrete_multi.policy` for multi-objective optimization. \n",
+ "Specify the number of objective functions in `num_objectives`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.163097Z",
+ "start_time": "2021-01-05T06:06:14.159742Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As with the usual usage of `physbo.search.discrete.policy` (with one objective function), optimization is done by calling the `random_search` or `bayes_search` methods. The basic API and usage are roughly the same as `discrete.policy`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Random search"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.316770Z",
+ "start_time": "2021-01-05T06:06:14.164245Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "\n",
+ "res_random = policy.random_search(max_num_probes=50, simulator=simu)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The evaluation value of the objective function (the array) and the action ID at that time are displayed. \n",
+ "It also displays a message when the Pareto set is updated.\n",
+ "\n",
+ "If you want to display the contents of the Pareto set when it is updated, specify `disp_pareto_set=True`. \n",
+ "Pareto set is sorted in ascending order of the first objective function value. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.493398Z",
+ "start_time": "2021-01-05T06:06:14.318132Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "res_random = policy.random_search(max_num_probes=50, simulator=simu, disp_pareto_set=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Checking results\n",
+ "\n",
+ " #### History of evaluation values"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.498984Z",
+ "start_time": "2021-01-05T06:06:14.494679Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res_random.fx[0:res_random.num_runs]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Obtaining the Pareto solution"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.504080Z",
+ "start_time": "2021-01-05T06:06:14.500385Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(array([[-0.95713719, -0.09067194],\n",
+ " [-0.92633083, -0.29208351],\n",
+ " [-0.63329589, -0.63329589],\n",
+ " [-0.52191048, -0.72845916],\n",
+ " [-0.26132949, -0.87913689],\n",
+ " [-0.17190645, -0.91382463]]),\n",
+ " array([40, 3, 19, 16, 29, 41]))"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "front, front_num = res_random.export_pareto_front()\n",
+ "front, front_num"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Plotting the solution (evaluated value)\n",
+ "\n",
+ "Note again that the space to be plotted is $y = (y_1, y_2)$ and not $x = (x_1, x_2)$.\n",
+ "\n",
+ "The red plot is the Pareto solution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.511086Z",
+ "start_time": "2021-01-05T06:06:14.505221Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "def plot_pareto_front(res):\n",
+ " front, front_num = res.export_pareto_front()\n",
+ " dominated = [i for i in range(res.num_runs) if i not in front_num]\n",
+ " points = res.fx[dominated, :]\n",
+ "\n",
+ " plt.figure(figsize=(7, 7))\n",
+ " plt.scatter(res.fx[dominated,0], res.fx[dominated,1], c = \"blue\")\n",
+ " plt.scatter(front[:, 0], front[:, 1], c = \"red\")\n",
+ " plt.title('Pareto front')\n",
+ " plt.xlabel('Objective 1')\n",
+ " plt.ylabel('Objective 2')\n",
+ " plt.xlim([-1.0,0.0])\n",
+ " plt.ylim([-1.0,0.0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.661288Z",
+ "start_time": "2021-01-05T06:06:14.512392Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcoAAAG5CAYAAAAOKnSzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAkTElEQVR4nO3de3QkZ3nn8e+jscdG2MSegRhzkcTFXEyGEKyYWwgcYnNINmDD4RJWAfuQMJtlycnZhBBnxUKWRIFwyW0J50SB4AvKcrHBNpdgxnaAkJgETSA4Ngk2xBK+MtgQMIKM8Tz7R5U8PbJU6ml1d3VXfz/n6HRXdXX1o5qRfqq33nrfyEwkSdL6xuouQJKkQWZQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEppREXECRHxmYj4bkS8ve56pEFlUEpdFhE3RMT3I+LOiLgtIs6NiGN68DnnRsTvbWEXu4FvAvfLzN/oUln3KI/Dad3er9RvBqXUG8/NzGOAJwLTwOsO581R6PXP5yRwbW4w6khEHNHjz5eGgkEp9VBm3gT8NfBjEXF8RHw0IvZFxLfK5w9Z3TYiPhURcxHxd8AK8PCIeExE7ImIOyLi3yLixeW2u4EZ4LXlmetHyvWPLffz7Yi4JiKet15dEXEucFbL+0+LiN+JiAsj4r0R8R3g7Ih4UERcWn7+9RHxypZ9/E5EfCAizi+bb6+JiOnytQuACeAj5f5f2/2jK/WHQSn1UEQ8FPg54AsUP2/voTiTmwC+D7xjzVteRtEkeiywD9gD/BXwo8AvAO+MiJMzcx5YAN6Smcdk5nMj4kjgI8Any+1/FViIiEevrSszz17z/svLl84ALgSOK19/H3Aj8CDghcDvR8SzWnb1vHKb44BLV7+fzHwZsEx5Zp2ZbzmsAycNEINS6o2LI+LbwGeBTwO/n5m3Z+ZFmbmSmd8F5oBnrHnfuZl5TWb+EHgOcENmviczf5iZXwAuAl60wWc+GTgGeHNm7s/MK4GPAi89jLqvysyLM/MAcH/gacBvZeYPMvOLwLuAl7ds/9nM/Hhm3g1cAPz4YXyWNBS8BiH1xpktZ2kARMQ48EcUAXh8ufrYiNhWBg3A11veMgk8qQzcVUdQBNJ6HgR8vQy5VUvAgw+j7tbPfxBwRxnqrfubblm+teX5CnB0RBxRBr3UCAal1D+/ATwaeFJm3hoRT6Boko2WbVo71nwd+HRmnr7B/tZ2wrkZeGhEjLWE5QTwlcOosXWfNwM7IuLYlrCcAG7qYF/S0LLpVeqfYymuS347InYAb9hk+48Cj4qIl0XEkeXXT0bEY8vXbwMe3rL9P1Cc1b223PaZwHMpriEetsz8OvD3wJsi4uiIeDzwS8B729zF2vqkoWRQSv3zx8B9KO5d/BzwiaqNy7O4Z1N04rmZopnzD4Cjyk3eDZxc9nC9ODP3UwTjz5af8U7g5Zn5r1uo+aXAVPn5HwbesLZJucKbgNeV9b1mCzVItQonbpYkaWOeUUqSVKHWoIyI55Q3UV8fEees8/pREfH+8vV/iIipGsqUJI2w2oIyIrYBf0ZxPeVk4KURcfKazX4J+FZmPpKiW/0f9LdKSdKoq/OM8lTg+sz8WtkJ4X0Uo4K0OgM4r3x+IfAzERFIktQndd5H+WAOvbn5RuBJG22TmT+MiP8AdlL06LtHOe7lboD73ve+pzzmMY/pVc2SpCG0d+/eb2bmAzp5byMGHCjHvZwHmJ6ezsXFxZorkiQNkohY6vS9dTa93gQ8tGX5Idx7xI97timn/PkR4Pa+VCdJEvUG5eeBkyLiYRGxneKm6kvXbHMpxVRAUMxccOVGc+dJktQLtTW9ltccXw1cBmwD/jIzr4mINwKLmXkpxcgjF0TE9cAdFGEqSVLf1HqNMjM/Dnx8zbrXtzz/ARtPKSRJUs85Mo8kSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUrRYWYGoKxsaKx4WFuiuSJNXsiLoLGBgLC7B7N6ysFMtLS8UywMxMfXVJkmrlGeWq2dmDIblqZaVYL0kaWQblquXlw1svSRoJBuWqiYnDWy9JGgkG5aq5ORgfP3Td+HixXpI0sgzKVTMzMD8Pk5MQUTzOz9uRR5JGnL1eW83MGIySpEN4RilJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUoXGBeXVV8PYGExNwcJC3dVIkobdEXUX0G379xePS0uwe3fxfGamvnokScOtcWeUrVZWYHa27iokScOs0UEJsLxcdwWSpGHW+KCcmKi7AknSMGt0UI6Pw9xc3VVIkoZZ44Jy+3aIgMlJmJ+3I48kaWsa1+t11y5YXKy7CklSU9RyRhkROyJiT0RcVz4ev842T4iIqyLimoj4UkS8pI5aJUmjra6m13OAKzLzJOCKcnmtFeDlmfk44DnAH0fEcf0rUZKk+oLyDOC88vl5wJlrN8jMr2TmdeXzm4FvAA/oV4GSJEF9QXlCZt5SPr8VOKFq44g4FdgOfHWD13dHxGJELO7bt6+7lUqSRlrPOvNExOXAA9d56ZCxcjIzIyIr9nMicAFwVmYeWG+bzJwH5gGmp6c33JckSYerZ0GZmadt9FpE3BYRJ2bmLWUQfmOD7e4HfAyYzczP9ahUSZI2VFfT66XAWeXzs4BL1m4QEduBDwPnZ+aFfaxNkqR71BWUbwZOj4jrgNPKZSJiOiLeVW7zYuCngbMj4ovl1xNqqVaSNLIis1mX9Kanp3PREQckSS0iYm9mTnfy3sYNYSdJUjcZlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAot2JhAaamYGyseFxYqLsiSVKXHVF3AUNrYQF274aVlWJ5aalYBpiZqa8uSVJXeUbZqdnZgyG5amWlWC9JagyDslPLy4e3XpI0lAzKTk1MHN56SdJQMig7NTcH4+OHrhsfL9ZLkhrDoOzUzAzMz8PkJEQUj/PzduSRpIax1+tWzMwYjJLUcJ5RSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklShlqCMiB0RsScirisfj6/Y9n4RcWNEvKOfNUqSBPWdUZ4DXJGZJwFXlMsb+V3gM32pSpKkNeoKyjOA88rn5wFnrrdRRJwCnAB8sj9lSZJ0qLqC8oTMvKV8fitFGB4iIsaAtwOv2WxnEbE7IhYjYnHfvn3drVSSNNJ6FpQRcXlE/Ms6X2e0bpeZCeQ6u3gV8PHMvHGzz8rM+cyczszpW299AGNjMDUFCwvd+V4kSaPriF7tODNP2+i1iLgtIk7MzFsi4kTgG+ts9hTg6RHxKuAYYHtE3JmZVdcz2b+/eFxagt27i+czMx19C5Ik1db0eilwVvn8LOCStRtk5kxmTmTmFEXz6/mbheRaKyswO7vVUiVJo6yuoHwzcHpEXAecVi4TEdMR8a5uftDycjf3JkkaNVFcImyOiOmExXuWJyfhhhvqq0eSVL+I2JuZ0528t9Ej84yPw9xc3VVIkoZZ44Jy+3aIKM4k5+ftyCNJ2pqe9Xqty65dsLi4+XaSJLWjcWeUkiR1k0EpSVIFg1KSpAoGpSRJFTYMynIeyDdFxAUR8V/XvPbO3pcmSVL9qs4o3wMEcBHwCxFxUUQcVb725J5XJknSAKgKykdk5jmZeXFmPg/4J+DKiNjZp9okSapd1X2UR0XEWGYeAMjMuYi4CfgMxWwekiQ1XtUZ5UeAZ7WuyMxzgd8A9vewJkmSBsaGZ5SZ+doN1n8COKlnFUmSNEC8PUSSpAoGpSRJFQxKSZIqbBqUETEeEf87Iv6iXD4pIn6+96VJklS/ds4o3wP8J/CUcvkm4Pd6VlGXLCzA1BSMjRWPCwt1VyRJGkbtBOUjMvMtwF0AmblCMWLPwFpYgN27YWkJMovH3bsNS0nS4WsnKPdHxH2ABIiIR1CcYQ6s2VlYWTl03cpKsV6SpMNRNTLPqt8BPgE8NCIWgKcBZ/ewpi1bXj689ZIkbWTToMzMT0bEXoqB0AP4tcz8Zs8r24KJiaK5db31kiQdjnZ6vX4EeDbwqcz86KCHJMDcHIyPH7pufLxYL0nS4WjnGuXbgKcD10bEhRHxwog4usd1bcnMDMzPw+QkRBSP8/PFekmSDkdkZnsbRmyjGCT9lcBzMvN+vSysU9PT07m4uFh3GZKkARIRezNzupP3ttOZh7LX63OBlwBPBM7r5MMkSRo2mwZlRHwAOJWi5+s7gE+vzlEpSVLTtXNG+W7gpZl5d6+LkSRp0GwYlBHxrMy8ErgvcEbEoYPxZOaHelybJEm1qzqjfAZwJcW1ybUSMCglSY23YVBm5hvKp2/MzH9vfS0iHtbTqiRJGhDt3Ed50TrrLux2IZIkDaKqa5SPAR4H/EhEvKDlpfsBAz3ggCRJ3VJ1jfLRwM8Dx3HodcrvUgw6IElS41Vdo7wEuCQinpKZV/WxJkmSBkY71yh/JSKOW12IiOMj4i97V5IkSYOjnaB8fGZ+e3UhM78F/ETPKpIkaYC0E5RjEXH86kJE7KDNMWIlSRp27QTe24GrIuKD5fKLAGd2lCSNhE3PKDPzfOAFwG3l1wsy84JeF9apq6+GsTGYmoKFhbqrkSQNu3aaXgF2AN/LzHcA+wZ5ZJ79+yETlpZg927DUpK0NZsGZUS8Afgt4LfLVUcC7+1lUd2ysgKzs3VXIUkaZu2cUT4feB7wPYDMvBk4tpdFddPyct0VSJKGWTtBuT8zk2LGECLivr0tqbsmJuquQJI0zNoJyg9ExJ8Dx0XEK4HLgb/obVndMT4Oc/bPlSRtwaa3h2Tm2yLidOA7FOO/vj4z9/S8sg5t3w533VWcSc7NwcxM3RVJkoZZWwMHlME4sOHYatcuWFysuwpJUlNs2PQaEZ8tH78bEd9Z5+vfI+JV/StVkqT+q5o95KfKx3V7uEbETuDvgXf2pjRJkurXVtNrRDwR+CmKnq+fzcwvZObtEfHMHtYmSVLt2hlw4PXAecBO4P7AuRHxOoDMvKW35UmSVK92zihngB/PzB8ARMSbgS8Cv9fDuiRJGgjt3Ed5M3B0y/JRwE29KUeSpMGy4RllRPxfimuS/wFcExF7yuXTgX/sT3mSJNWrqul19W7EvcCHW9Z/qmfVSJI0YKpuDzkPICKOBh5Zrr5+9VqlJEmjoGrAgSMi4i3AjRS9Xs8Hvh4Rb4mII/tVoCRJdarqzPNWigmbH5aZp2TmE4FHAMcBb+tDbZIk1a4qKH8eeGVmfnd1RWZ+B/jvwM/1ujBJkgZBVVBmOQ/l2pV3U85NKUlS01UF5bUR8fK1KyPiF4F/7V1JkiQNjqrbQ/4H8KGIeAXFLSIA08B9gOf3ujBJkgZB1e0hNwFPiohnAY8rV388M6/oS2WSJA2ATcd6zcwrgSv7UIskSQOnnbFeJUkaWQalJEkVDEpJkioYlJIkVaglKCNiR0TsiYjrysfjN9huIiI+GRFfjohrI2Kqz6VKkkZcXWeU5wBXZOZJwBXl8nrOB96amY8FTgW+0af6JEkC6gvKMyhmJKF8PHPtBhFxMnBEZu4ByMw7M3OlbxVKkkR9QXlCZt5SPr8VOGGdbR4FfDsiPhQRX4iIt0bEtvV2FhG7I2IxIhb37dvXq5olSSNo0wEHOhURlwMPXOel2daFzMyIWG+Q9SOApwM/ASwD7wfOBt69dsPMnAfmAaanpx2wXZLUNT0Lysw8baPXIuK2iDgxM2+JiBNZ/9rjjcAXM/Nr5XsuBp7MOkEpSVKv1NX0eilwVvn8LOCSdbb5PHBcRDygXH4WcG0fapMk6R51BeWbgdMj4jrgtHKZiJiOiHfBPfNevga4IiKuBgL4i5rqlSSNqFhnbuahdtRR03nXXYtMTMDcHMzM1F2RJKluEbE3M6c7eW/PrlHWZf/+4nFpCXbvLp4blpKkTjV6CLuVFZid3Xw7SZI20uigBFhevve6hQWYmoKxseJxYaHfVUmShkXjg3Ji4tDlhYWiSXZpCTIPNtEaliPOv54kbaBxnXkiphMWARgfh/n5Q69RTk0V4bjW5CTccENfStSgWf3raaVlhMT1/vNIGlpb6czTuKDcrNfr2FhxJrlWBBw40J8aNWD860lqPHu9tti1CxYXN359YmL934lrm2g1Qta7kF21XtJIafw1yrXm5opWtVbj48V6jaiN/kryrydJjGBQzswUl54mJ4vm1slJL0WNPP96klShcU2v7ZiZMRjVYvU/w+xs0dzqsE6SWoxkUEr34l9PkjYwck2vkiQdjpEJSu8nlyR1YiSaXtfeT+6A6ZKkdo3EGeXs7KGDroADpkuS2jMSQen95JKkTo1EUHo/uSSpUyMRlN5PLknq1EgEpaPxSJI6NRK9XsH7ySVJnRmJM0pJkjplUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDMotWFiAqSkYGyseFxbqrkiS1G0jMx9lty0swO7dsLJSLC8tFcvgvJeS1CSeUXZodvZgSK5aWSnWS5Kaw6Ds0PLy4a2XJA0ng7JDExOHt16SNJwMyg7NzcH4+KHrxseL9VJP2YtM6iuDskMzMzA/D5OTEFE8zs/bkUc9ttqLbGkJMg/2IjMspZ6JzKy7hq6anp7OxcXFusuQemNqqgjHtSYn4YYb+l2NNDQiYm9mTnfy3sadUV59tS1SajB7kUl917ig3L/fFik1mL3IpL5rXFC28r5GNY69yKS+a3RQgi1Sahh7kUl917jOPBHTCQc789jHQZJkZ54N2CIlSdqqxgXl9u22SEmSuqdxs4fs2gXeRilJ6pbGnVHecYeje0mSuqdxZ5RLS3DgwMHnzhEpSdqKxp1RrobkKu+llCRtReOCcj3eSylJ6tRIBKWje0mSOtW4oBxb8x15L6UkaSsaF5STk47uNQicW1hSUzSu1+uOHd5HWbfVuYVXVoplex9LGmaNO6NU/WZnD4bkKnsfSxpWjQvKvXtt6qubcwtLapLGBSU4aXPdnFtYUpM0MijBpr46ObewpCZpbFCCTX11cW5hSU3S6ImbnbRZkgRO3Lwum/rUT943KjVX4+6jhOJMcm7Opj71h/eNSs3WuKbX6enpXHTEAfXR1FQRjmvZ9C8NjqFreo2IHRGxJyKuKx+P32C7t0TENRHx5Yj404iIXtdmE5oOl/eNSs1W1zXKc4ArMvMk4Ipy+RAR8VTgacDjgR8DfhJ4Ri+LWm1CW1qCTO/HVHu8b1RqtrqC8gzgvPL5ecCZ62yTwNHAduAo4Ejgtl4W5dBr6oT3jUrNVldQnpCZt5TPbwVOWLtBZl4F/A1wS/l1WWZ+eb2dRcTuiFiMiMV9+/Z1XJRNaOqE941KzdazXq8RcTnwwHVeOuT8LDMzIu7VoygiHgk8FnhIuWpPRDw9M/927baZOQ/MQ9GZp9OaJybW75RhE5o2MzNjMEpN1bOgzMzTNnotIm6LiBMz85aIOBH4xjqbPR/4XGbeWb7nr4GnAPcKym6Zmzu0mz/YhCZJo66uptdLgbPK52cBl6yzzTLwjIg4IiKOpOjIs27Ta7fYhCZJWquW+ygjYifwAWACWAJenJl3RMQ08CuZ+csRsQ14J/DTFB17PpGZv77Zvr2PUpK01lbuo6xlZJ7MvB34mXXWLwK/XD6/G/hvfS5NkqRDNHasV0mSusGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqtC4oLz6aiddliR1T+OCcv/+e0+6vLBQBKcBKkk6XLWM9dpLEdMJB8d63bkTvv/9e88I4mDnkjQ6tjLWa+POKNe6/fZDQxKK5dnZ9beXJKlV44NyI8vLdVcgSRoGIxuUExN1VyBJGgYjGZTj4zA3V3cVkqRhMBJBuXMnTE5CRPFoRx5JUrtqmbi5l8bG4MCBg8vj4/Anf2IwSpI607gzyoiDz3fu9OxRkrQ1jQvKu+8++Pz736+vDklSMzQuKFt5v6QkaasaHZTg/ZKSpK1pfFB6v6SkvnBQ6cZqXK/XVt4vKakvFhaKWRhWx8tcnZUB7E3YAI0bFP2oo6bzrrsWmZgoQtL/o5J6bmqqCMe1Jifhhhv6XY3WsZVB0Rt3RrlrFywubr6dJHXNRp0h7CTRCI2/RilJPbdRZwg7STSCQSlJWzU3V3SKaGUnicYwKCVpq2ZmimHAHFS6kRp3jVKSajEzYzA2lGeUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSmsYB2rvK20MkqUkcoL3rPKOUpCaZnT0YkqucxX5LDEpJahIHaO86g7JNNvlLGgoO0N51BmUbVpv8l5Yg82CTv2EpaeA4QHvXGZRtsMlf0tBwgPaui8ysu4aump6ezsUuz9w8NlacSa4VAQcOdPWjJEk9EBF7M3O6k/c27ozy6qu7fx3RJn9JdbOfRH0aF5T793f/OqJN/pLqZD+JejWu6TViOuFg0+vkJNxww9b3u7BQXJNcXi7OJOfmbPKX1B9TU0U4rtWt32+jYCtNr40PSlj/+qIkDQv7SWyd1ygrbNtm276k4WY/iXo1Pijvvtu2fUnDzX4S9Wp8UG7b5j2Qkoabt0bWq9HXKMfH7x2SB7ezbV+SRoXXKFts337oX1yTk+tvZ9u+JKkdjZuPctcuWDswT+vUbGDbviSpfY07o1zLtn1J0lY07oxyPTMzBqMkqTONO6PsxVivkqTR1bgzyv37i8elJXjFK4rnnk1KkjrVuDPKVvv3w6/9Wt1VSJKGWaODEuD22+uuQJI0zBoflJJGj+M7q5sad41yrZ07665AUj+tzt24eu/06vjOYH8FdabxZ5QvfnHdFUjqp9lZx3dWdzU+KM87z2YXaZQsLx/eemkzjQ9K/5KURotzN6rbGh+UUFyjkDQanLtR3TYSQbltm73gpFHh+M7qtkbPR9lq7dyU4+P+8EgaPgsLxeWk5eWiOXluzt9j7XA+yk1E2AtOOly2wgye1VtflpYg8+CtL/7b9FYtQRkRL4qIayLiQERsmPAR8ZyI+LeIuD4izun08zY6aR7WXnDd+gW22X6G4RdluzX2+3sZhmNXpd+/kIf9ePWLt77UJDP7/gU8Fng08ClgeoNttgFfBR4ObAf+GTh5832fksWP9uZfk5M5dN773szx8UO/j/HxYn0399Otz+mldmvs9/cyDMduM5OT/fuZacLx6peI9f9dIuqubPABi9lpZnX6xm58bRKUTwEua1n+beC3N99ne0E5rD+I3foFttl++vmLslPt1tjv72UYjt1m+vkLuQnHq188Vp3bSlDW2pknIj4FvCYz79X7JiJeCDwnM3+5XH4Z8KTMfPU62+4GykGqjjoFfmydT7v7h3DgABy5He7aDzffBN+8o3vfTb+ccsrGr+3du7X97AMeUO6nW5/TS+3W2PPv5f7AN/v4eX3w+F3Fz8pad+2HL13dpQ8pj1sTjle/3H8H3GcKfjQOrssDsLw0nL/P+urRmXlsJ2/s2VivEXE58MB1XprNzEu6+VmZOQ/Ml5+7mLnYUc+mUVccuyWP3WEqjltnvelGmcetM8VxW/a4HaaIuPftEG3qWVBm5mlb3MVNwENblh9SrpMkqW8G+faQzwMnRcTDImI78AvApTXXJEkaMXXdHvL8iLiRosPOxyLisnL9gyLi4wCZ+UPg1cBlwJeBD2TmNW3sfr5HZY8Cj11nPG6d8bh1xuPWmY6PW+NG5pEkqZsGuelVkqTaGZSSJFUY+qDs93B4TRIROyJiT0RcVz4ev8F2bymP8Zcj4k8jItbbblQcxnGbiIhPlsft2oiY6nOpA6Xd41Zue7+IuDEi3tHPGgdRO8ctIp4QEVeVP6dfioiX1FHrINjsd31EHBUR7y9f/4d2fi6HPiiBfwFeAHxmow0iYhvwZ8DPAicDL42Ik/tT3kA7B7giM08CriiXDxERTwWeBjyeYiSHnwSe0c8iB9Cmx610PvDWzHwscCrwjT7VN6jaPW4Av0vFz/SIaee4rQAvz8zHAc8B/jgijutfiYOhzd/1vwR8KzMfCfwR8Aeb7XfogzIzv5yZ/7bJZqcC12fm1zJzP/A+4IzeVzfwzgDOK5+fB5y5zjYJHE0x3u5RwJHAbf0oboBtetzKH84jMnMPQGbemZkra7cbMe38fyMiTgFOAD7Zn7IG3qbHLTO/kpnXlc9vpvij7AH9KnCAtPO7vvV4Xgj8zGatZEMflG16MPD1luUby3Wj7oTMvKV8fivFL6dDZOZVwN8At5Rfl2Xml/tX4kDa9LgBjwK+HREfiogvRMRby792R9mmxy0ixoC3A6/pZ2EDrp3/b/eIiFMp/rD9aq8LG0Dt/K6/Z5vyNsT/AHZW7bRnI/N0Uz+Hw2uaqmPXupCZGRH3ulcoIh5JMdvLQ8pVeyLi6Zn5t10vdoBs9bhR/Gw9HfgJYBl4P3A28O7uVjpYunDcXgV8PDNvHKVL4V04bqv7ORG4ADgrMw90t8rRNRRB6XB4nas6dhFxW0ScmJm3lD9g611Dez7wucy8s3zPX1MMFNHooOzCcbsR+GJmfq18z8XAk2l4UHbhuD0FeHpEvAo4BtgeEXdmZqM74HXhuBER9wM+RnEC8bkelTro2vldv7rNjRFxBPAjwO1VOx2VpleHw1vfpcBZ5fOzgPXOzpeBZ0TEERFxJEVHnlFvem3nuH0eOC4iVq8TPQu4tg+1DbJNj1tmzmTmRGZOUTS/nt/0kGzDpset/L32YYrjdWEfaxs07fyubz2eLwSuzM1G3ul0fq5B+aI447kR+E+KTiaXlesfRNGEs7rdzwFfoWi3n6277kH4omiXvwK4Drgc2FGunwbeVT7fBvw5RTheC/xh3XXX/dXOcSuXTwe+BFwNnAtsr7v2YThuLdufDbyj7rrr/mrz5/QXgbuAL7Z8PaHu2ms6Xvf6XQ+8EXhe+fxo4IPA9cA/Ag/fbJ8OYSdJUoVRaXqVJKkjBqUkSRUMSkmSKhiUkiRVMCglSapgUEp9FBEPiYhLypkgvhoRf1Le70VEnL3RbBkR8fcdft6ZrYNCR8QbI2KrA3i0PWuP1AQGpdQn5cDLHwIuzmImiEdRjD4zt9l7M/OpHX7smRSzKKzu5/WZeXmH+2q16aw9UlMYlFL/PAv4QWa+ByAz7wb+J/CKiBgvt3loRHyqPON8w+obI+LOlue/GRGfL+cd/D8t619ervvniLignCLtecBbI+KLEfGIiDg3Il5Yztn3wZb3PjMiPlo+f3Y5t+E/RcQHI+KYtd9Itjdrj9QIQzHWq9QQjwP2tq7IzO9ExDLwyHLVqRTzfq4An4+Ij2Xm4ur2EfFs4KRyuwAujYifphir8nXAUzPzmxGxIzPviIhLgY9mOaxZy0DjlwPzEXHfzPwe8BLgfRFx/3I/p2Xm9yLit4BfpxjZRBpJBqU0WPZk5u0AEfEh4KeAxZbXn11+faFcPoYiOH8c+GBmfhMgM++o+pDM/GFEfAJ4bkRcCPwX4LUUY/meDPxdGarbgau6861Jw8mglPrnWopBmO9RzvgwQTHu5BMpJsputXY5gDdl5p+v2c+vdlDP+4BXA3cAi5n53fI66p7MfGkH+5MayWuUUv9cAYxHxMsByomc3w6cm5kr5TanR8SOiLgPRUecv1uzj8sormkeU+7jwRHxo8CVwIsiYme5fke5/XeBYzeo59MU4fxKitAE+BzwtHIeUiLivhHxqC18z9LQMyilPsliBoLnUwTadRQzHPwA+F8tm/0jcBHFrCMXtVyfzHIfnwT+CrgqIq4GLgSOzcxrKHrPfjoi/hn4w/J97wN+MyK+EBGPWFPP3cBHgZ8tH8nMfRSzdvy/iPgSRbPrY9Z+LxHx/Ii4kWL+yI9FxGUdHxhpwDl7iDTgyrPEf8rMybprkUaRZ5TSAIuIB1Gc1b2t7lqkUeUZpSRJFTyjlCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIq/H9/YsaKiEZ7oAAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plot_pareto_front(res_random)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Calculate the volume of the dominated region\n",
+ "\n",
+ "A solution that is not a Pareto solution, i.e., a solution $y$ for which there exists a solution $y'$ that is better than itself, is called a inferior solution ($\\exists y' y\\prec y'$). The volume of the inferior solution region, which is the space occupied by inferior solutions in the solution space (a subspace of the solution space), is one of the indicators of the results of multi-objective optimization. The larger this value is, the more good Pareto solutions are obtained.`res_random.pareto.volume_in_dominance(ref_min, ref_max)` calculates the volume of the inferior solution region in the hyper-rectangle specified by `ref_min` and `ref_max`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.666649Z",
+ "start_time": "2021-01-05T06:06:14.662809Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.2376881844865093"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "res_random.pareto.volume_in_dominance([-1,-1],[0,0])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Bayesian optimization\n",
+ "\n",
+ "For `bayes_search` in the multi-objective case, `score` can be selected from the following method\n",
+ "\n",
+ "- HVPI (HyperVolume-based Probability of Improvement)\n",
+ "- EHVI (Expected Hyper-Volume Improvement)\n",
+ "- TS (Thompson Sampling)\n",
+ "\n",
+ "The following 50 evaluations (10 random searches + 40 Bayesian optimizations) will be performed with different scores."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### HVPI (HyperVolume-based Probability of Improvement)\n",
+ "\n",
+ "The improvement probability of a non-dominated region in a multi-dimensional objective function space is obtained as a score. \n",
+ "\n",
+ "- Reference\n",
+ " - Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. \"Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.\" Journal of Global Optimization 60.3 (2014): 575-594."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:29.019463Z",
+ "start_time": "2021-01-05T06:06:14.668034Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "\n",
+ "policy.random_search(max_num_probes=10, simulator=simu)\n",
+ "res_HVPI = policy.bayes_search(max_num_probes=40, simulator=simu, score='HVPI', interval=10)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Plotting the Pareto solution\n",
+ "\n",
+ "We can see that more Pareto solutions are obtained compared to random sampling."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:29.190434Z",
+ "start_time": "2021-01-05T06:06:29.020967Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcoAAAG5CAYAAAAOKnSzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAkvElEQVR4nO3deZhld13n8fenyQJNwCSAISzVzRKWIIik2EV8MGHQERIURaaU5FHpcRiZeWZExGlGGLAFQdwGeR5LkAQohyUsCYuELAIuQamWzQQhgOkmEELYhNBigHznj3MqXV2punXr1t3v+/U89dx7zj333F+dp+t++nfO7/c9qSokSdL6doy6AZIkjTODUpKkDgxKSZI6MCglSerAoJQkqQODUpKkDgxKaUYlOSnJB5J8M8nLR90eaVwZlFKfJbk6yb8luSHJdUnOTXLcAD7n3CS/vY1d7AG+DNy+qn6tT826WXscTu/3fqVhMyilwXhiVR0HPASYB563lTenMei/z13AlbVB1ZEkRw3486WJYFBKA1RVnwf+EviBJCckeWeS65N8rX1+t5Vtk7wvyb4kfwscAu6Z5H5JLk7y1SSfTPKz7bZ7gAXgOW3P9R3t+vu3+/l6kiuSPGm9diU5Fzh71ftPT/KCJOcneX2SbwDnJLlLkgvbz/90kmes2scLkrwpyWvb07dXJJlvX3sdMAe8o93/c/p/dKXhMCilAUpyd+AngA/T/L29hqYnNwf8G/CKNW/5BZpTorcDrgcuBv4C+H7g54BXJjm1qhaBJeClVXVcVT0xydHAO4D3tts/C1hKct+17aqqc9a8/5L2pTOB84Hj29ffAFwD3AV4CvA7SR63aldParc5Hrhw5fepql8ADtL2rKvqpVs6cNIYMSilwXh7kq8DfwO8H/idqvpKVb2lqg5V1TeBfcBj17zv3Kq6oqq+CzwBuLqqXlNV362qDwNvAX5mg898BHAc8JKqurGqLgPeCTxtC+2+vKreXlU3AXcEHg38RlV9u6o+ArwKePqq7f+mqt5dVd8DXgf84BY+S5oIXoOQBuOsVb00AJLsBP6AJgBPaFffLsmt2qAB+Nyqt+wCHt4G7oqjaAJpPXcBPteG3IoDwF230O7Vn38X4KttqK/e3/yq5S+uen4IuHWSo9qgl6aCQSkNz68B9wUeXlVfTPJgmlOyWbXN6oE1nwPeX1VnbLC/tYNwvgDcPcmOVWE5B3xqC21cvc8vACcmud2qsJwDPt/DvqSJ5alXaXhuR3Nd8utJTgSev8n27wTuk+QXkhzd/jw0yf3b168D7rlq+7+n6dU9p932R4En0lxD3LKq+hzwd8CLk9w6yYOAXwJe3+Uu1rZPmkgGpTQ8fwjchmbu4geB93TauO3FPZ5mEM8XaE5z/i5wbLvJq4FT2xGub6+qG2mC8cfbz3gl8PSq+udttPlpwO72898GPH/tKeUOXgw8r23fs7fRBmmk4o2bJUnamD1KSZI6GGlQJnlCO4n600meu87rxyZ5Y/v63yfZPYJmSpJm2MiCMsmtgD+huZ5yKvC0JKeu2eyXgK9V1b1phtX/7nBbKUmadaPsUT4M+HRVfbYdhPAGmqogq50JnNc+Px/4sSRBkqQhGeU8yrty5OTma4CHb7RNVX03yb8Cd6AZ0Xeztu7lHoDb3va2p93vfvc7/OL+/Ru34LTTem27JGmC7N+//8tVdade3jsVBQfaupeLAPPz87W8vHz4xd274cCBW75p1y5YvZ0kaWolWScIujPKU6+fB+6+avlu3LLix83btLf8+T7gK1v6lH37YOfOI9ft3NmslyRpE6MMyg8BpyS5R5JjaCZVX7hmmwtpbgUEzZ0LLtvo3nkbWliAxcWmB5k0j4uLzXpJkjYxslOv7TXHXwUuAm4F/HlVXZHkhcByVV1IU3nkdUk+DXyVJky3bmHBYJQk9WSk1yir6t3Au9es+61Vz7/NxrcUkiRp4KzMI0lSBwalJEkdGJSSJHVgUEqS1MHsBuXSUlOMYMeO5nFpadQtkiSNoamozLNlS0uwZw8cOtQsHzjQLIPTSCRJR5jNHuXevYdDcsWhQ816SZJWmc2gPHhwa+slSTNrNoNybm5r6yVJM2s2g9JC6ZKkLs1mUFooXZLUpdkc9QoWSpckdWU2e5SSJHXJoJQkqQODUpKkDgxKSZI6MCglSerAoNyIRdMlSczy9JBOLJouSWrZo1yPRdMlSS2Dcj0WTZcktQzK9Vg0XZLUMijXY9F0SVLLoFyPRdMlSS1HvW7EoumSJOxRSpLUkUEpSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBuVWLS3B7t2wY0fzuLQ06hZJkgbIWq9bsbQEe/YcvqnzgQPNMlgXVpKmlD3Krdi793BIrjh0qFkvSZpKBuVWHDy4tfWSpIlnUG7F3NzW1kuSJp5BuRX79sHOnUeu27mzWS9JmkoG5VYsLMDiIuzaBUnzuLjoQB5JmmKOet2qhQWDUZJmiD1KSZI6MCjXsJ6AJGk1T72uYj0BSdJa9ihXsZ6AJGktg3IV6wlIktYyKFexnoAkaS2DchXrCUiS1jIoV7GegCRpLUe9rmE9AUnSavYoJUnqwKCUJKkDg1KSpA4MSkmSOjAoJUnqwKDsN6uqS9JUcXpIP1lVXZKmjj3KfrKquiRNHYOyn6yqLklTx6DsJ6uqS9LUMSj7yarqkjR1DMp+sqq6JE0dR732m1XVJWmq2KOUJKkDg1KSpA5GEpRJTkxycZKr2scT1tnmwUkuT3JFko8leeoo2ipJmm2j6lE+F7i0qk4BLm2X1zoEPL2qHgA8AfjDJMcPr4mSJI0uKM8EzmufnwectXaDqvpUVV3VPv8C8CXgTsNqoCRJMLqgPKmqrm2ffxE4qdPGSR4GHAN8ZoPX9yRZTrJ8/fXX97elkqSZNrDpIUkuAe68zktHFD6tqkpSHfZzMvA64Oyqumm9bapqEVgEmJ+f33BfkiRt1cCCsqpO3+i1JNclObmqrm2D8EsbbHd74F3A3qr64ICaKknShkZ16vVC4Oz2+dnABWs3SHIM8DbgtVV1/hDbJknSzUYVlC8BzkhyFXB6u0yS+SSvarf5WeBHgHOSfKT9efBIWitJmlmpmq5LevPz87W8vDzqZkiSxkiS/VU138t7rcwjSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBuWoLC3B7t2wY0fzuLQ06hZJktYxsMo86mBpCfbsgUOHmuUDB5plgIWF0bVLknQL9ihHYe/ewyG54tChZr0kaawYlKNw8ODW1kuSRsagHIW5ua2tlySNjEE5Cvv2wc6dR67bubNZL0kaKwblKCwswOIi7NoFSfO4uOhAHkkaQ456HZWFBYNRkiaAPUpJkjowKCVJ6sCglCSpA4NSkqQODEpJkjowKCVJ6sCglCSpA4NSkqQODEpJkjowKCVJ6sCgHFdLS7B7N+zY0TwuLY26RZI0k6z1Oo6WlmDPnsM3dz5woFkG68NK0pDZoxxHe/ceDskVhw416yVJQ2VQbsPAzo4ePLi19ZKkgTEoe7RydvTAAag6fHa0L2E5N7e19ZKkgTEoezTQs6P79sHOnUeu27mzWS9JGiqDskcDPTu6sACLi7BrFyTN4+KiA3kkaQQc9dqjubnmdOt66/tiYcFglKQxYI+yR54dlaTZYFD2aOhnRy1AIEkj4anXbRja2VELEEjSyNijnAQWIJCkkTEoJ4EFCCRpZAzKSWABAkkaGYNyEjjEVpJGxqCcBBYgkKSRcdTrpLAAgSSNhD1KSZI6MCglSerAoJQkqQODUpKkDgxKSZI6MCglSerAoJQkqQODUpKkDgxKSZI6MCglSerAoJwmS0uwezfs2NE8Li2NukWSNPGs9TotlpZgz57DN3g+cKBZBmvEStI22KOcFnv3Hg7JFYcONeslST0zKKfFwYNbWy9J6opBOS3m5ra2XpLUFYNyWuzbBzt3Hrlu585mvSSpZwbltFhYgMVF2LULkuZxcdGBPJK0TY56nSYLCwajJPWZPUpJkjowKKedRQgkaVs89TrNLEIgSdtmj3KaWYRAkrbNoJxmFiGQpG0zKKeZRQgkadsMymlmEQJJ2jaDcppZhECSts1Rr9POIgSStC0j6VEmOTHJxUmuah9P6LDt7ZNck+QVw2yjJEkwulOvzwUurapTgEvb5Y28CPjAUFolSdIaowrKM4Hz2ufnAWett1GS04CTgPcOp1mSJB1pVEF5UlVd2z7/Ik0YHiHJDuDlwLM321mSPUmWkyxff/31/W2pJGmmDWwwT5JLgDuv89IRZWGqqpLUOts9E3h3VV2TpONnVdUisAgwPz+/3r4kSerJwIKyqk7f6LUk1yU5uaquTXIy8KV1Nnsk8JgkzwSOA45JckNVdbqeKUlSX41qesiFwNnAS9rHC9ZuUFU3z2lIcg4wb0hKkoZtVNcoXwKckeQq4PR2mSTzSV41ojZJknQLqZquS3rz8/O1vLw86mZIksZIkv1VNd/Ley1hJ0lSBwalJEkdGJQ6bGkJdu+GHTuax6WlUbdIkkbOouhqLC3Bnj1w6FCzfOBAswwWVZc00+xRqrF37+GQXHHoULNekmaYQanGwYNbWy9JM8KgVGNubmvrJWlGbBiU7X0gX5zkdUn+05rXXjn4pmmo9u2DnTuPXLdzZ7NekmZYpx7la4AAbwF+LslbkhzbvvaIgbdMw7WwAIuLsGsXJM3j4qIDeSTNvE6jXu9VVT/dPn97kr3AZUmeNIR2aRQWFgxGSVqjU1Aem2RHVd0EUFX7knwe+ADN3TwkSZp6nU69vgN43OoVVXUu8GvAjQNsk8aNhQgkzbANe5RV9ZwN1r8HOGVgLdJ4sRCBpBnn9BB1ZiECSTPOoFRnFiKQNOMMSnVmIQJJM27ToEyyM8n/TvJn7fIpSX5y8E3TWLAQgaQZ102P8jXAvwOPbJc/D/z2wFrUJw7U7BMLEUiacd3cZuteVfXUJE8DqKpDSTLgdm2LAzX7zEIEkmZYNz3KG5PcBiiAJPei6WGOLQdqSpL6pZugfAHwHuDuSZaAS4F151iOCwdqDonntyXNgE1PvVbVe5PspymEHuC/V9WXB96ybZiba063rrdefeL5bUkzoptRr+8AHg+8r6reOe4hCQ7UHArPb0uaEd2cev094DHAlUnOT/KUJLcecLu2xYGaQ+D5bUkzIlXV3YbJrWiKpD8DeEJV3X6QDevV/Px8LS8vj7oZ02/37vXPb+/aBVdfPezWSFJHSfZX1Xwv7+2qMk876vWngV8BHgqc18uHaYp4flvSjOjmGuWbgE/Q9CZfQTOv8lmDbpjGnOe3Jc2IbgoOvBp4WlV9b9CN0YSxEIGkGbBhUCZ5XFVdBtwWOHNtMZ6qeuuA2yZJ0sh1OvX62Pbxiev8WBRdnVmMQNKU2LBHWVXPb5++sKr+ZfVrSe4x0FZpslmMQNIU6WbU61vWWXd+vxuiKWIxAklTpNM1yvsBDwC+L8lPrXrp9sBYFxzQiFmMQNIU6TTq9b401yKPp7kuueKbNEUHpPVZbFfSFOl0jfIC4IIkj6yqy4fYJk26ffuOvEYJFiOQNLG6uUb5K0mOX1lIckKSPx9ckzTxLEYgaYp0U3DgQVX19ZWFqvpakh8aXJM0FSxGIGlKdNOj3JHkhJWFJCfSXcBKkjTxugnKlwOXJ3lRkhcBfwe8dLDN0lSzGIGkCbJpz7CqXptkmaYoOsBPVdWVg22WppbFCCRNmK5uswWcCHyrql4BXG9lHvXMYgSSJkw3t9l6PvAbwG+2q44GXj/IRmmKWYxA0oTppkf5ZOBJwLcAquoLwO0G2ShNsY2KDliMQNKY6iYob6yqAgogyW0H2yRNtX37muIDq1mMQNIY6yYo35TkT4HjkzwDuAT4s8E2S1PLYgSSJkyazuImGyVnAI8HAlxUVRcPumG9mp+fr+Xl5VE3Q5I0RpLsr6r5Xt7bVeGANhjHNhwlSRqUDU+9Jvmb9vGbSb6xzs+/JHnm8JoqSdLwdbp7yA+3j+uOcE1yB5oqPa8cTNMkSRq9rgoOJHlIkv+W5FkrBdGr6ivAjw6ycZohlrWTNKa6KTjwW8B5wB2AOwLnJnkeQFVdO9jmaSaslLU7cACqDpe1MywljYFNR70m+STwg1X17Xb5NsBHquq+Q2jfljnqdQLt3t2E41q7dsHVVw+7NZKm0HZGvXZz6vULwK1XLR8LfL6XD5PWZVk7SWNsw8E8Sf4vTTWefwWuSHJxu3wG8A/DaZ5mwtzc+j1Ky9pJGgOd5lGunL/cD7xt1fr3Daw1mk379h156y2wrJ2ksbHhqdeqOq+qzgPeSBOW+4E3rlov9Ue3Ze0cGStpBDqdej0K+B3gF4EDNOXr7p7kNcDeqvrOcJqombCw0Lneqzd8ljQinQbzvIzmhs33qKrTquohwL2A44HfG0LbpMO84bOkEekUlD8JPKOqvrmyoqq+AfwX4CcG3TDpCI6MlTQinYKyap1JllX1Pdp7U0pD4w2fJY1Ip6C8MsnT165M8vPAPw+uSdI6vOGzpBHpND3kvwJvTfKLNCNeAeaB2wBPHnTDpCOsDNjZu7c53To314SkA3kkDVg3JeweBzygXbyyqi4deKu2wRJ2kqS1Bnrj5qq6DLisl51LkjTpurrNljT2LEYgaUA27VFKY89iBJIGyB6lJp/FCCQNkEGpyWcxAkkDNJKgTHJikouTXNU+nrDBdnNJ3pvkE0muTLJ7yE3VJLAYgaQBGlWP8rnApVV1CnBpu7ye1wIvq6r7Aw8DvjSk9mmSWIxA0gCNKijPBFZu1XUecNbaDZKcChxVVRcDVNUNVXVo7XZS17fpkqQebFpwYCAfmny9qo5vnwf42sryqm3OAn4ZuBG4B3AJ8Ny21uza/e0B9gDMzc2dduDAgUE2X5I0YbZTcGBgPcoklyT5p3V+zly9XVt4fb20Pgp4DPBs4KHAPYFz1vusqlqsqvmqmr/Tne7U319E08F5lpJ6NLB5lFV1+kavJbkuyclVdW2Sk1n/2uM1wEeq6rPte94OPAJ49SDaqynmPEtJ2zCqa5QXAme3z88GLlhnmw8BxydZ6SI+DrhyCG3TtHGepaRtGFVQvgQ4I8lVwOntMknmk7wKbr7v5bOBS5N8HAjwZyNqryaZ8ywlbcNIBvMM0rHHztd3vrPsXZh02O7dzenWtXbtgquvHnZrJI3AWA7mGZUbb4Sqw5ehHLMh51lK2o6pC8rVvAwlwHmWkrZl6k69JvMFy6uW4aabRtggSdLIeeq1A8t9qivOs5S0gam+H6WXodQV51lK6mDqTr066lVb5qhYaept59Tr1PUoH/hAWF7efDvpZs6zlNTB1F+jlDbl/SwldWBQSs6zlNSBQSk5z1JSB1N3jVLqycKCwShpXfYopW44z1KaWfYopc04z1KaafYopc14P0tpphmU0macZynNNINS2ozzLKWZZlBKm3GepTTTDEppM86zlGaaQSl1Y2GhKZB+003N43oh6RQSaSo5PUTqB6eQSFPLHqXUD04hkaaWQSn1g1NIpKllUEr94BQSaWoZlFI/OIVEmloGpdQPTiGRppajXqV+8VZd0lSyRykNg3MspYllj1IaNOdYShPNHqU0aM6xlCaaQSkNmnMspYlmUEqD5hxLaaIZlNKgOcdSmmgGpTRozrGUJppBKQ1DN7fpAqeRSGPIoNwGv9PUVyvTSA4cgKrD00j8hyWNlEHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HfdTiNxFJk0VBZF79HKd9fevc3p1rm5JiSdGqdt2exWXRZYl4YuVTXqNvTV/Px8LS8vj7oZ0mDs3t2E41q7djXzMyWtK8n+qprv5b2eepUmiaPIpKEzKKVJ4igyaegMSmmSOIpMGjqDUpokFliXhs6glCZNNwXWnUIi9Y3TQ6Rp4xQSqa/sUUrTxkLEUl8ZlNK0cQqJ1FcGpTRtnEIi9ZVBKU0bp5BIfWVQStNmK1NIHB0rbcpRr9I02qy4Ojg6VuqSPUppVjk6VuqKQSnNKkfHSl0xKKVZ5ehYqSsGpTSrHB0rdcWglGaVBdalrjjqVZpl3YyOlWacPUpJkjowKCVJ6sCglCSpA4NSkqQODEpJ22fNWE0xR71K2h5rxmrK2aOUtD3WjNWUG0lQJjkxycVJrmofT9hgu5cmuSLJJ5L8cZIMu62SNmHNWE25UfUonwtcWlWnAJe2y0dI8ijg0cCDgB8AHgo8dpiNlNQFa8Zqyo0qKM8Ezmufnwectc42BdwaOAY4FjgauG4YjZO0BVupGeugH02gUQXlSVV1bfv8i8BJazeoqsuBvwKubX8uqqpPrLezJHuSLCdZvv766wfVZknr6bZm7MqgnwMHoOrwoB/DUmMuVTWYHSeXAHde56W9wHlVdfyqbb9WVUdcp0xyb+CPgKe2qy4GnlNVf93pc+fn52t5eXk7TZc0CLt3N+G41q5dcPXVw26NZkyS/VU138t7BzY9pKpO3+i1JNclObmqrk1yMvCldTZ7MvDBqrqhfc9fAo8EOgalpDHloB9NqFGder0QOLt9fjZwwTrbHAQem+SoJEfTDORZ99SrpAngoB9NqFEF5UuAM5JcBZzeLpNkPsmr2m3OBz4DfBz4KPDRqnrHKBorqQ+8UbQm1Egq81TVV4AfW2f9MvDL7fPvAf95yE2TNCgrg3v27m1Ot87NNSFp9R6NOSvzSBqehYVm4M5NNzWPCwtOGdHYs9arpNGxTqwmgD1KSaNjnVhNAINS0ug4ZUQTwKCUNDrbmTLitU0NiUEpaXR6nTJiOTwNkUEpaXS6rRO7ltc2NUQDq/U6KtZ6lWbAjh1NT3KtpJl6Iq2xnVqv9iglTR7L4WmIDEpJk8dyeBoig1LS5On12qbUAyvzSJpMCwsGo4bCHqUkSR0YlJJk8QJ14KlXSbPNwuzahD1KSbPN4gXahEEpabZZmF2bMCglzTaLF2gTBqWk2WbxAm3CoJQ02yxeoE046lWSLF6gDuxRSlK/OB9zKtmjlKR+cD7m1LJHKUn94HzMqWVQSlI/OB9zahmUktQPzsecWgalJPWD8zGnlkEpSf3gfMypZVBKUr8sLMDVV8NNNzWPWw1Jp5eMJaeHSNI4cHrJ2LJHKUnjwOklY8uglKRx4PSSsWVQStI4cHrJ2DIoJWkcOL1kbBmUkjQO+jm9xNGzfeWoV0kaF/243ZejZ/vOHqUkTRNHz/adQSlJ08TRs31nUErSNHH0bN8ZlJI0TRw923cGpSRNE4uz952jXiVp2vRj9KxuZo9SktSdGZ2faY9SkrS5GZ6faY9SkrS5GZ6faVBKkjY3w/MzDUpJ0uZmeH6mQSlJ2twMz880KCVJm+v3/MwJGkHrqFdJUnf6NT9zwkbQ2qOUJA3XhI2gNSglScM1YSNoDUpJ0nBN2Ahag1KSNFwTNoLWoJQkDVcvI2hHOEo2VTW0DxuGY4+dr+98Z5m5ueY/J2M4gEqStBVrR8lC0wPdwvSUJPurar6Xj5+6oEzmC5aBLR9HSdI42r27mUKy1q5dcPXVXe3CoFxldVDClo6jJGkc7dgB62VVAjfd1NUuthOUU3+NckxHG0uSujXiUbJTH5RjOtpYktStXkfJrhoA9CB4YK8fP9VBOcajjSVJ3ep1lOyePc21zSqOhmN6/fipu0bpqFdJ0toBQPPAclV62dXUFUV/4ANheXnz7SRJU6yPA1Sm+tSrJGlG9XGAikEpSZo+6w0A6pFBKUmaPmsGAH0Hbux1V1MXlB//+ETcMFsaexN0A3ppfQsLTcWZm27iY/DxXnczkqBM8jNJrkhyU5INKyUkeUKSTyb5dJLndrPvG29sCjis3DDbP+6NTcMX4bj+DuParm6tGVnv35NmW1UN/Qe4P3Bf4H3A/Abb3Ar4DHBPmvkvHwVO3Xzfp1Xzp9387NpVWsfrX1+1c2cdcax27mzWT4px/R3GtV1bsWvXke3370mTDliuHjNrpPMok7wPeHZV3WJCR5JHAi+oqv/QLv8mQFW9uPM+j6z1uoVSgDOlDzWGR25cf4dxbddW9KG0pjRWtlPrdZznUd4V+Nyq5WuAh6+3YZI9wJ5m6ViaqaWNqu/cmHys53PT0+u002657noOHLgTyf79w29PL9b7HZqQGvLvcEfgy4cXx6Zd2/CgB8LRt6hk0ue/pzXHTV3yuPXmvr2+cWBBmeQS4M7rvLS3qi7o52dV1SKw2H7uctVyT/9rmHXNsTvgsdui5rj19j/VWeZx643HrTdJei5FM7CgrKrTt7mLzwN3X7V8t3adJElDM87TQz4EnJLkHkmOAX4OuHDEbZIkzZhRTQ95cpJrgEcC70pyUbv+LkneDVBV3wV+FbgI+ATwpqq6oovdLw6o2bPAY9cbj1tvPG698bj1pufjNnV3D5EkqZ/G+dSrJEkjZ1BKktTBxAflIMvhTbskJya5OMlV7eMJG2z30vYYfyLJHyfp6ean02ILx20uyXvb43Zlkt1DbupY6fa4tdvePsk1SV4xzDaOo26OW5IHJ7m8/Tv9WJKnjqKt42Cz7/okxyZ5Y/v633fzdznxQQn8E/BTwAc22iDJrYA/AX4cOBV4WpJTh9O8sfZc4NKqOgW4tF0+QpJHAY8GHgT8APBQ4LHDbOQY2vS4tV4LvKyq7g88DPjSkNo3rro9bgAvosPf9Izp5rgdAp5eVQ8AngD8YZLjh9fE8dDld/0vAV+rqnsDfwD87mb7nfigrKpPVNUnN9nsYcCnq+qzVXUj8AbgzMG3buydCZzXPj8POGudbQq4NU293WOBo4HrhtG4MbbpcWv/OI+qqosBquqGqjo0tBaOp27+vZHkNOAk4L3DadbY2/S4VdWnquqq9vkXaP5TdqdhNXCMdPNdv/p4ng/82GZnySY+KLu0Xjm8u46oLePkpKq6tn3+RZovpyNU1eXAXwHXtj8XVdUnhtfEsbTpcQPuA3w9yVuTfDjJy9r/7c6yTY9bkh3Ay4FnD7NhY66bf283S/Iwmv/YfmbQDRtD3XzX37xNOw3xX4E7dNrpONd6vdkwy+FNm07HbvVCVVWSW8wVSnJvmru93K1ddXGSx1TVX/e9sWNku8eN5m/rMcAPAQeBNwLnAK/ub0vHSx+O2zOBd1fVNbN0KbwPx21lPycDrwPOrirL1/fJRASl5fB61+nYJbkuyclVdW37B7beNbQnAx+sqhva9/wlTaGIqQ7KPhy3a4CPVNVn2/e8HXgEUx6UfThujwQek+SZwHHAMUluqKqpHoDXh+NGktsD76LpQHxwQE0dd918169sc02So4DvA77SaaezcurVcnjruxA4u31+NrBe7/wg8NgkRyU5mmYgz6yfeu3muH0IOD7JynWixwFXDqFt42zT41ZVC1U1V1W7aU6/vnbaQ7ILmx639nvtbTTH6/whtm3cdPNdv/p4PgW4rDarvNPrjSzH5Yemx3MN8O80g0wuatffheYUzsp2PwF8iua8/d5Rt3scfmjOy18KXAVcApzYrp8HXtU+vxXwpzTheCXw+6Nu96h/ujlu7fIZwMeAjwPnAseMuu2TcNxWbX8O8IpRt3vUP13+nf488B3gI6t+Hjzqto/oeN3iux54IfCk9vmtgTcDnwb+AbjnZvu0hJ0kSR3MyqlXSZJ6YlBKktSBQSlJUgcGpSRJHRiUkiR1YFBKQ5TkbkkuaO8E8Zkkf9TO9yLJORvdLSPJ3/X4eWetLgqd5IVJtlvAo+u79kjTwKCUhqQtvPxW4O3V3AniPjTVZ/Zt9t6qelSPH3sWzV0UVvbzW1V1SY/7Wm3Tu/ZI08KglIbnccC3q+o1AFX1PeB/AL+YZGe7zd2TvK/tcT5/5Y1Jblj1/NeTfKi97+D/WbX+6e26jyZ5XXuLtCcBL0vykST3SnJukqe09+x786r3/miSd7bPH9/e2/Afk7w5yXFrf5Hq7q490lSYiFqv0pR4ALB/9Yqq+kaSg8C921UPo7nv5yHgQ0neVVXLK9sneTxwSrtdgAuT/AhNrcrnAY+qqi8nObGqvprkQuCd1ZY1W1Vo/BJgMcltq+pbwFOBNyS5Y7uf06vqW0l+A/ifNJVNpJlkUErj5eKq+gpAkrcCPwwsr3r98e3Ph9vl42iC8weBN1fVlwGq6qudPqSqvpvkPcATk5wP/EfgOTS1fE8F/rYN1WOAy/vzq0mTyaCUhudKmiLMN2vv+DBHU3fyITQ3yl5t7XKAF1fVn67Zz7N6aM8bgF8FvgosV9U32+uoF1fV03rYnzSVvEYpDc+lwM4kTwdob+T8cuDcqjrUbnNGkhOT3IZmIM7frtnHRTTXNI9r93HXJN8PXAb8TJI7tOtPbLf/JnC7DdrzfppwfgZNaAJ8EHh0ex9Sktw2yX228TtLE8+glIakmjsQPJkm0K6iucPBt4H/tWqzfwDeQnPXkbesuj5Z7T7eC/wFcHmSjwPnA7erqitoRs++P8lHgd9v3/cG4NeTfDjJvda053vAO4Efbx+pqutp7trx/5J8jOa06/3W/i5JnpzkGpr7R74ryUU9HxhpzHn3EGnMtb3Ef6yqXaNuizSL7FFKYyzJXWh6db836rZIs8oepSRJHdijlCSpA4NSkqQODEpJkjowKCVJ6sCglCSpg/8PSC/HP/JR9eEAAAAASUVORK5CYII=",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "best_fx, best_action = res.export_all_sequence_best_fx()\n",
+ "plt.plot(best_fx)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With `res.export_sequence_best_fx()`, you can get the best value obtained at each step and the history of the action. \n",
+ "\n",
+ "The difference between `res.export_all_sequence_best_fx()` and `res.export_all_sequence_best_fx()` is that the information is not for each evaluation of the simulator, but for each search step. In this case, the total number of steps is 10, and the number of evaluations is 100."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:20:24.535770Z",
+ "start_time": "2020-12-04T06:20:24.418352Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAD3CAYAAAD2S5gLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAAdSklEQVR4nO3dfZBV9Z3n8fcHmgclICE2DyqiBjQKEpGbSFKDEJJAxo2aYZKyyjhkqkJwMvsQluyktuIuNVXj1uxGnThumGxIJVvBnR3GmGyJyyLBiXSoKMTGiLcJ8SHMoN0KaVAuLbY89Xf/6HPx0t2X23374dy+9/OqulXn/s7vnPM7F+p++pzvPecoIjAzMys0Iu0BmJlZ5XE4mJlZNw4HMzPrxuFgZmbdOBzMzKyburQHMBAuvvjiuOKKK9IehpnZsLJ79+7DEVHf07yqCIcrrriCxsbGtIdhZjasSDpQbJ5PK5mZWTcOBzMz66ascJC0RtJOSWtK9HtI0g5Jawva7pLUIGmTpAuKra+32zAzs4HX53CQNB2YERELgCWSZhbpNw8gIhYCcyRNlzQWuBX4BLA8Itp7Wl9vt2FmZoOjnCOHpcBGSfOBycDCIv1mAXuS6aeAucBNQDuwFcgfTfS0vt5uw8zMBkHJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6QY6v/y7rq+ntq7jWiWpUVJja2tr7/bWzMx6peRPWSNiA7Ah/17SSmAdsJLOL/rDRZbLStovaStwBMgBI4GGiOiQ1ABcA7QWWd95txER64H1AJlMxreWNTMbQOVc57AN+ExENEn6bxQERw/ujYjTkv4e2AWMAb4CfAeYDTxMZ3B0Xd/bfdiGmQ2yN4+f5B9+9SonTp1JeyjWxdVTx/PZuZcM+Hr7HA4RcSD5FdEu4PGI2AsgaSLwlxGxOnk/Hng0KUJ/PyKOJO3PSnoaeC4inkvaelpftzYzG3ovHWrjyz96ltfebEdKezTW1WfnXjIo4aBqeNhPJpMJXyFtNvB+/ttD/Lt/eJ4LRo/k+ysy3DB9YtpDsgEkaXdEZHqaVxW3zzCzgRURfH/Hfv56y2+ZfckEvr8iw7SLLkh7WDaEHA5mdo4Tp8/wn/5PEz/e3cy/un4a93/hw1wwemTaw7Ih5nAws7MOv32CP3t4N40H3uJrn5zF1z45ixEjXGioRQ4HMwNg3xvHWPmjRo4cP8F37pw3KEVOGz4cDmbGz/YeZPU/Ps/4sXX8+O6Pc/1lF6U9JEuZw8GshkUE3234HfdtfZG5l17E+hUZpkwYm/awrAI4HMxq1LunzvDNn2b56a9buPXDl3Df5+cydpQLz9bJ4WBWg37f9i53P7ybX796lK9/+mr+zZKZyFe4WQGHg1mN2ft6jq/8qJG33jnFd794I394/bS0h2QVyOFgVkOeaHqDf/+Pe3j/haP48Z99jDmXuvBsPXM4mNWAiOA7P3+FB7a9xLzLJ/K9P5nP5PEuPFtxDgezKvfuqTN849EX2LTndf5o3qX89fLrXXi2khwOZlXs0LF3WbWhkRdacnzjM9fw1UUfdOHZesXhYFalss05vrKhkWPvnuJ7d81n6eypaQ/JhhGHg1kV2vzCG3z9x8/zgXFj+MlXP8610yakPSQbZhwOZlUkIvjbf3qZB598mcyM9/M//mQ+F79vTNrDsmHI4WBWJdpPnuE/PLqHzS+8wefnX8Z/+aM5jKlz4dnKM6KchSStSR7juaZEv4ck7ZC0tqDtLkkNkjZJuqCgX4OkHxb0Oyhpe/KaX844zWrFG7l2vvC9p/l/2Te455Zrue/zcx0M1i99DgdJ04EZEbEAWCJpZpF+8wAiYiEwR9L05HnStwKfAJZHRLukccCjEbEIOCRpYbKKJyJicfLaXca+mdWE5187yu3f+SX/cvgdfvClDF+5+Sr/Isn6rZwjh6XAxuSv+cnAwiL9ZgF7kumngLnATUA7sBVYCxARxyPiF0m/N4ETyfTNyVHHA/L/dLMePfZ8C3d87xnGjBrBT//84yz50JS0h2RVomQ4SFoh6cn8C5gKnKTzy30VUOx/4z5gUTK9AJgATAPGAcuAKyXdULCdWcCNEfGrpGl1ctRRB9zSw7hWSWqU1Nja2lp6T82qSEdH8MDPXuRrG5/nw9Mn8ti//gOunjI+7WFZFSkZDhGxISI+lX8Bh4B1wD3AGOBwkeWywH5JW4FRQA44DjRERAfQAFwDIOkS4FvA3QXLb0omNwOze1j/+ojIRESmvr6+t/trNuy9c/I0f/73z/Hff/4Kd2Sm87++fBOTxo1Oe1hWZco5rbQNaI6IJmA58Mx5+t4bEcuAAHYBu+k8LQWdX/gvJ9MPAHdHxDEASZMkLUnmZYBXyhinWdV5/Wg7n//uM/zsNwf5z5+9jv/6x9czuq6s35WYnVef/1dFxAFgp6RdQFtE7AWQNFHSg/l+ksYDmyU1AFsi4khEvA48K+lpYFREPCfpo8Bi4JHkl0m3AceAO5NlrwUe699umg1/uw+8xW3f+SWvvfkOP/zTj/DlP7jShWcbNIqItMfQb5lMJhobG9Mehtmg+elzzfzHn2SZNnEsP/hShpmTXV+w/pO0OyIyPc3zRXB21sHcuzy57xDD/8+F6vLSwTYe3nmAj131Af7uizfyftcXbAg4HOysv9n2Io80Nqc9DOvBF2+6nL+8bTajRrq+YEPD4WBnvdCc4w9mXsy377gh7aFYgVEjxcQLfbRgQ8vhYEDnA2Fe/v3bfPq6KdSP943azGqdj1ENgN+8cYwzHeFnCpsZ4HCwRFNLDoDrHQ5mhsPBEtnmHB8YN5ppF/mh82bmcLBEtiXHnEsv8kVVZgY4HIz3itE+pWRmeQ4HY5+L0WbWhcPB3itGX+ZwMLNODgcj25Jj0rjRXOJitJklHA5GtuWYi9Fmdg6HQ41799QZXj7UxlzXG8ysgMOhxv32YBunXYw2sy4cDjUu62K0mfXA4VDjss1HXYw2s27KCgdJayTtlLSmRL+HJO2QtLag7S5JDZI2SbogaTuYPCJ0u6T5fdmG9Y+L0WbWkz6Hg6TpwIyIWAAskTSzSL95ABGxEJgjabqkscCtwCeA5RHRnnR/IiIWJ6/dvd2G9U++GH39pRPSHoqZVZhyjhyWAhuTv/AnAwuL9JsF7EmmnwLmAjcB7cBWYG1B35uTI4wH1PknbG+3Yf2QL0b7thlm1lXJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6YZk/urkCKMOuIXOQDjvNiStktQoqbG1tbU3+2pd5IvR/qWSmXVVMhwiYkNEfCr/Ag4B64B7gDHA4SLLZYH9krYCo4AccBxoiIgOoAG4Jum7KVlsMzAbaC21jYhYHxGZiMjU19f3YZctr6k5x/svHMWlEy9IeyhmVmHKOa20DWiOiCZgOfDMefreGxHLgAB2AbvpPGUEnSHwsqRJkpYkbRnglT5uw8rk23SbWTF9DoeIOADslLQLaIuIvQCSJkp6MN9P0nhgs6QGYEtEHImI14FnJT0NjIqI54BjwJ1Jv2uBx4ptwwbOu6fO8NKhNtcbzKxHioi0x9BvmUwmGhsb0x7GsLLntaPcvu6XfPeLN/KH109LezhmlgJJuyMi09M8XwRXo1yMNrPzcTjUqKaWHBMvHMVl73cx2sy6czjUqGxLjutdjDazIhwONejE6c5itE8pmVkxDoca9OLBNk6d8ZXRZlacw6EGnb1Nt8PBzIpwONSgppYcF13gYrSZFedwqEEuRptZKQ6HGnPi9BlePNjmJ7+Z2Xk5HGqMi9Fm1hsOhxrjYrSZ9YbDoca4GG1mveFwqDEuRptZbzgcaki+GO0ro82sFIdDDXnp4NsuRptZrzgcaoiL0WbWWw6HGpJNitHTJ7kYbWbnV1Y4SFojaaekNSX6PSRph6S1BW13SWqQtEnSBZIul7Q9eb0m6XNJv4MF7fPLGaedq6klx5xLJ7gYbWYl9TkcJE0HZkTEAmCJpJlF+s0DiIiFwBxJ0yWNBW4FPgEsj4j2iHg1IhZHxGKgCdiarOKJfHtE7O77rlmhk6c7XIw2s14r58hhKbAx+Wt+MrCwSL9ZwJ5k+ilgLnAT0E5nAKwt7CzpKqAlItqTppuTo44H5D91++2lQ22cPNPheoOZ9UrJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6YaC/ncAGwver06OOuqAW3oY1ypJjZIaW1tbS+1GzXMx2sz6omQ4RMSGiPhU/gUcAtYB9wBjgMNFlssC+yVtBUYBOeA40BARHUADcE3BIkuB7QXLb0omNwOze1j/+ojIRESmvr6+5I7WumxLjglj67h80oVpD8XMhoFyTittA5ojoglYDjxznr73RsQyIIBdwG46QwA6v/BfhrOnlA5GxOnk/SRJS5J+GeCVMsZpBTqL0b4y2sx6p8/hEBEHgJ2SdgFtEbEXQNJESQ/m+0kaD2yW1ABsiYgjEfE68Kykp4FREfFc0v124PGCzRwD7kyWvRZ4rIx9s8TJ0x389o02n1Iys16rK2ehiLgfuL9L21FgdcH7NjprC12X/Svgr7q0fbvL+9PAynLGZt3li9H+pZKZ9ZYvgqsBTS5Gm1kfORxqQLYlx/ixdcz4gIvRZtY7DocakG3JMecSF6PNrPccDlXubDHaz4w2sz5wOFQ5XxltZuVwOFQ5F6PNrBwOhyrnYrSZlcPhUOWaXIw2szI4HKrYqTMd7DvoYrSZ9Z3DoYq9dKiNk6d9ZbSZ9Z3DoYq5GG1m5XI4VLFsS47xY+qY4dt0m1kfORyqWLblGLMvncCIES5Gm1nfOByq1KkzHex745hPKZlZWRwOVerlQ2+7GG1mZXM4VCkXo82sPxwOVSrbkuN9Y+q44gPj0h6KmQ1DDocqlW3JMfsSF6PNrDxlhYOkNZJ2SlpTot9DknZIWpu8v1zS9uT1mqTPFVtfb7dh3Z12MdrM+qnP4SBpOjAjIhYASyTNLNJvHkBELATmSJoeEa9GxOKIWAw0AVt7Wl9vt2E9e/n3b3PidIdvm2FmZSvnyGEpsFHSfGAysLBIv1nAnmT6KWBufoakq4CWiGgvsr6S25C0SlKjpMbW1tYydqN6ZZNitH+pZGblKhkOklZIejL/AqYCJ4G1wCpgSpFF9wGLkukFwISCeXcAG5PpyT2sr6e2c0TE+ojIRESmvr6+1G7UlGxzZzH6ShejzaxMdaU6RMQGYEP+vaSVwDpgJTAOOFxkuayk/ZK2AkeAXMHspcB9yXRrkfWV3Ib1LNuS4zoXo82sH8o5rbQNaI6IJmA58Mx5+t4bEcuAAHbB2VNKByPi9HnW15dtWAEXo81sIPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J44HNkhqALRFxJJl1O/D4+dZXbBtWWr4YPdfFaDPrB0VE2mPot0wmE42NjWkPoyI80vga33j0Bf7p64v4YP370h6OmVUwSbsjItPTPF8EV2WaWlyMNrP+czhUGRejzWwgOByqiIvRZjZQHA5V5JXWt3n3VIfDwcz6zeFQRbLNvjLazAaGw6GKNLXkGDd6JFdd7GK0mfWPw6GKdN6m+yIXo82s3xwOVeL0mQ5+88Yxn1IyswHhcKgSv2s93lmMvmxC6c5mZiU4HKpE1s+MNrMB5HCoEk0tOS4cPZIrL/YtM8ys/xwOVSL/zOiRLkab2QBwOFSB02c6+M3rLkab2cBxOFSB37Uep/3UGdcbzGzAOByqgIvRZjbQHA5VIF+MvsrPbzCzAeJwqALZlhzXTXMx2swGTlnhIGmNpJ2S1pTo95CkHZLWJu8vl7Q9eb0m6XMF/Rok/bBg2YMFfeeXM85acKYjXIw2swHX53CQNB2YERELgCWSZhbpNw8gIhYCcyRNj4hXI2JxRCwGmoCtksYBj0bEIuCQpIXJKp7I942I3WXsW034XevbtJ8642dGm9mAKufIYSmwMflrfjKwsEi/WcCeZPopYG5+hqSrgJaIaI+I4xHxi2TWm8CJZPrm5KjjAUndzpdIWiWpUVJja2trGbtRHfK36XYx2swGUslwkLRC0pP5FzAVOAmsBVYBU4osug9YlEwvAApv+nMHsLHLdmYBN0bEr5Km1clRRx1wS9eVR8T6iMhERKa+vr7UblStrIvRZjYISoZDRGyIiE/lX8AhYB1wDzAGOFxkuSywX9JWYBSQK5i9FNiefyPpEuBbwN0Fy29KJjcDs/uwTzWlycVoMxsE5ZxW2gY0R0QTsBx45jx9742IZUAAu+DsKaWDEXG6oN8DwN0RcSzpM0nSkmReBniljHFWvTMdwV4Xo81sEPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J44HNkhqALRFxJJl1O/B4Qb+PAouBR5JfJt0GHAPuTJa9FnisnJ2rdvuTYrTrDWY20OrKWSgi7gfu79J2FFhd8L4NWNbDst/u8v5XwLQeNrOynLHVkrNXRvuXSmY2wHwR3DCWbclxwaiRfNDFaDMbYA6HYaypJcd1vk23mQ0Ch8MwlS9Gu95gZoPB4TBM/fPht3nn5Bn/UsnMBoXDYZjybbrNbDA5HIapbPMxxo4awQfrx6U9FDOrQg6HYSp/ZXTdSP8TmtnA8zfLMHSmI2h6PedTSmY2aBwOw5CL0WY22BwOw5CvjDazweZwGIbyxeiZvjLazAaJw2EYamrJca2L0WY2iPztMsx0dAR7XYw2s0HmcBhm9h8+zvGTvk23mQ0uh8Mw0+RitJkNAYfDMJNtybkYbWaDzuEwzGRdjDazIVDWN4ykNZJ2SlpTot9DknZIWpu8vzx5FOh2Sa9J+lzSfrCgfX5ftlFLOjqC3/g23WY2BPocDpKmAzMiYgGwRNLMIv3mAUTEQmCOpOkR8WpELI6IxUATsDXp/kS+PSJ293Ybteafjxzn7ROnfWW0mQ26co4clgIbk7/wJwMLi/SbBexJpp8C5uZnSLoKaImI9qTp5uQI4wFJ6s02JK2S1CipsbW1tYzdGH6afJtuMxsiJcNB0gpJT+ZfwFTgJLAWWAVMKbLoPmBRMr0AmFAw7w5gY8H71ckRRh1wC52BcN5tRMT6iMhERKa+vr7UblSFbHOOMXUjmDXZxWgzG1x1pTpExAZgQ/69pJXAOmAlMA44XGS5rKT9krYCR4BcweylwH0FfTclk5uBG4DW3myj1rgYbWZDpZxvmW1Ac0Q0AcuBZ87T996IWAYEsAvOnlI6GBGnk/eTJC1J+meAV/q4jZrQ4WdGm9kQ6nM4RMQBYKekXUBbROwFkDRR0oP5fpLGA5slNQBbIuJIMut24PGCVR4D7kz6XQs8VmwbtexfkmK0w8HMhkLJ00o9iYj7gfu7tB0FVhe8bwOW9bDst7u8P03n6aOS26hl+dt0+5dKZjYUfPJ6mMg25xhdN4JZU1yMNrPB53AYJvLF6FEuRpvZEPA3zTDwXjF6QunOZmYDwOEwDLgYbWZDzeEwDLgYbWZDzeEwDDS1dBajr54yPu2hmFmNcDgMA9mWHNdOHe9itJkNGX/bVLiOjmBvyzGfUjKzIeVwqHAH3nyHthOnmevHgprZEHI4VDgXo80sDQ6HCuditJmlweFQ4bLNLkab2dDzN04FiwiaXs/5lJKZDTmHQwU7cOQd2t71ldFmNvQcDhXMxWgzS4vDoYI1teQYPdLFaDMbeg6HCpZtyfGhaeMZXed/JjMbWmV960haI2mnpDUl+j0kaYektcn7yyVtT16vSfpcT21J34MF7fPLGedwFhFkW1yMNrN09PkxoZKmAzMiYoGk/ytpU0S80kO/eQARsVDSI5KmR8SrwOJk/hZga0S0d21LVvFERPxpGftUFVyMNrM0lXPksBTYmPw1PxlYWKTfLGBPMv0UMDc/Q9JVQEsSDMXabk6OOh6QpK4rl7RKUqOkxtbW1jJ2o7Lli9EOBzNLQ8lwkLRC0pP5FzAVOAmsBVYBU4osug9YlEwvAAofY3YHsLFL/65tqyNiIZ1HN7d0XXlErI+ITERk6uvrS+3GsONitJmlqWQ4RMSGiPhU/gUcAtYB9wBjgMNFlssC+yVtBUYBuYLZS4HtXRY5py0iNiWTm4HZvdmZapJtyXHNVBejzSwd5XzzbAOaI6IJWA48c56+90bEMiCAXXD29NHBiDid79S1TdIkSUuS2RmgW02jmkUETS5Gm1mK+hwOEXEA2ClpF9AWEXsBJE2U9GC+n6TxwGZJDcCWiDiSzLodeLzLaru2HQPuTJa9Fnisr+Mczl598x2OuRhtZinq86+VACLifuD+Lm1HgdUF79uAZT0s++1SbckRxMpyxlYNXIw2s7T5hHYFyrbkGDVSXD31fWkPxcxqlMOhAjUlxegxdSPTHoqZ1SiHQ4XpLEYf8yklM0uVw6HCvPZmO7n2U1x/6cS0h2JmNczhUGFcjDazSuBwqDAuRptZJXA4VBgXo82sEjgcKkj+Nt0+pWRmaXM4VJDmtzqL0b5thpmlzeFQQVyMNrNK4XCoIPli9DVTfZtuM0uXw6GCNLXkuHqKi9Fmlj6HQ4WICF5odjHazCqDw6FCuBhtZpXE4VAhXIw2s0ricKgQ2ZYcdSNcjDazyuBwqBD5YvTYUS5Gm1n6ygoHSWsk7ZS0pkS/hyTtkLS2oO2bkn4h6SeS6pK2OyTtknRfQb9ubdXKV0abWaXpczhImg7MiIgFwBJJM4v0mwcQEQuBOZKmSxoNLIiIm4GXgIykC4FbI+Im4H2SlvTUVt7uDQ/Nb7Vz9J1TzLnM4WBmlaGcZ0gvBTZKmg9MBhYCr/TQbxawJ5l+CpgbEZsljZZ0A3A18CLwMWCzpMuBDwGLAPXQ9vPClUtaBawCuPzyy8vYDfjtwWP82//967KWHUjvnDwDuBhtZpWjZDhIWgGsKGh6CngeWEvnl/Nniiy6D/gL4AfAAuBo0r4D+Bbwu6RtMpAD/gb4EvDNIm3niIj1wHqATCYTpfajJ2PrRjJrSmXcGvuT105mziUT0h6GmRnQi3CIiA3Ahvx7SSuBdcBKYBxwuMhyWUn7JW0FjgA5SR8BJkTEUkl/AXwBaAXuA74BnEjW11PbgLvi4nH83RfnD8aqzcyGtXIK0tuA5ohoApYDz5yn770RsQwIYBcwFTidzDsCTEqWfwt4Mlnf00XazMxsiPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J4+msGzQAWyLiCLAFuCJpuw14OCKOA98HngWuS/p2a+vHPpqZWR8poqzT9RUlk8lEY2Nj2sMwMxtWJO2OiExP83wRnJmZdeNwMDOzbhwOZmbWjcPBzMy6cTiYmVk3VfFrJUmtwIF+rOJiBulCu2HIn8W5/Hm8x5/Fuarh85gREfU9zaiKcOgvSY3Ffs5Va/xZnMufx3v8WZyr2j8Pn1YyM7NuHA5mZtaNw6HT+rQHUEH8WZzLn8d7/Fmcq6o/D9cczMysGx85mJlZNw4HMzPrpubDQdIaSTslrUl7LGmT9JCkBkk/THsslULShyX5lvGApLuS/x+bJF2Q9njSJOmbkn4h6SeSynnccsWr6XCQNJ3Oi0AWAEskzUx7TGmRNA54NCIWAYckLUx7TGmTNAL4MjAq7bGkTdJY4FbgE8DyiGhPeUipkTQaWBARNwMvAVV5rUNNhwOwFNgoaT6dz62u2S/EiDgeEb9I3r5J5+NZa91XgP+Z9iAqxE1AO7CVzufH16yIOAmMlnQDcDXwYrojGhy1Hg6TgZN0/mdfBUxJdzjpkzQLuDEifpX2WNIkaSpwaUT8Ou2xVIhpdD4zfhlwZfLFWMt2AN8Cfg8cTXcog6PWw6EVWAfcA4xh+N8npV8kXULnf/i70x5LBfhj4NOStgPzJX015fGk7TjQEBEdQANwTcrjSY2kjwATImIpsB/4QspDGhS1Hg7bgOaIaAKWA8+kPJ60PQDcHRHH0h5I2iJiXUR8LCIWA7sj4rtpjyllu+k8DQswG3g5xbGkbSpwOpk+AkxKcSyDpqbDISIOADsl7QLaImJv2mNKi6SPAouBRyRtl3RbykOyChIRrwPPSnoaGBURz6U9phRtAa6Q1ADcBjyc8ngGha+QNjOzbmr6yMHMzHrmcDAzs24cDmZm1o3DwczMunE4mJlZNw4HMzPrxuFgZmbd/H8njNt/3qLGsgAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "
We would like to thank the support from “Project for advancement of software usability in materials science” by The Institute for Solid State Physics, The University of Tokyo, for development of PHYSBO.
Bayesian optimization is a method that can be used in complex simulations or real-world experimental tasks where the evaluation of the objective function (e.g., property values) is very costly. In other words, Bayesian optimization solves the problem of finding explanatory variables (material composition, structure, process and simulation parameters, etc.) that have a better objective function (material properties, etc.) with as few experiments and simulations as possible. In Bayesian optimization, we start from a situation where we have a list of candidates for the explanatory variables to be searched (represented by the vector \({\bf x}\)). Then, from among the candidates, the one that is expected to improve the objective function \(y\) is selected by making good use of prediction by machine learning (using Gaussian process regression). We then evaluate the value of the objective function by performing experiments and simulations on the candidates. By repeating the process of selection by machine learning and evaluation by experimental simulation, optimization can be achieved in as few times as possible.
+
The details of the Bayesian optimization algorithm are described below.
+
+
Step1: Initialization
+
+
Prepare the space to be explored in advance. In other words, list up the composition, structure, process, simulation parameters, etc. of the candidate materials as a vector \({\bf x}\). At this stage, the value of the objective function is not known. A few candidates are chosen as initial conditions and the value of the objective function \(y\) is estimated by experiment or simulation. This gives us the training data \(D = \{ {\bf x}_i, y_i \}_{(i=1, \cdots, N)}\) with the explanatory variables \({\bf x}\) and the objective function \(y\).
+
+
Step2: Selection of candidates
+
+
Using the training data, learn a Gaussian process. For Gaussian process, the mean of the predictions at arbitary \({\bf x}\) is \(\mu_c ({\bf x})\) and the variance is \(\sigma_c ({\bf x})\) are given as follows
where \(k({\bf x}, {\bf x}')\) is a function called as a kernel, and it represents the similarity of two vectors. In general, the following Gaussian kernel is used:
For all candidates that have not yet been tested or simulated, the prediction \(\mu_c ({\bf x})\) and the variance associated with the uncertainty of the prediction \(\sigma_c ({\bf x})\) are estimated. Using this, the acquisition function is calculated. Then, the candidate \({\bf x}^*\) is selected that maximizes the acquisition function from among the candidates for which we do not yet know the value of the objective function. In this case, \(\sigma\) and \(\eta\) are called hyperparameters, and PHYSBO will automatically set the best value.
+
As an acquisition function, for example, Maximum Probability of Improvement (PI) and Maximum Expected Improvement (EI) are useful.
+The score of PI is defined as follows.
where \(\Phi(\cdot)\) is the cumulative distribution function.
+The PI score represents the probability of exceeding the maximum \(y_{\max}\) of the currently obtained \(y\).
+In addition, the EI score is the expected value of the difference between the predicted value and the current maximum \(y_{\max}\) and is given by
where \(\phi(\cdot)\) is a probability density function.
+
+
Step3: Experiment (Simulation)
+
+
Perform an experiment or simulation on the candidate \({\bf x}^*\) with the largest acquisition function selected in step 2, and estimate the objective function value \(y\). This will add one more piece of training data. Repeat steps 2 and 3 to search for candidates with good scores.
In PHYSBO, random feature map, Thompson sampling, and Cholesky decomposition are used to accelerate the calculation of Bayesian optimization.
+First, the random feature map is introduced.
+By introducing the random feature map \(\phi (\mathbf{x})\), we can approximate the Gaussian kernel \(k(\mathbf{x},\mathbf{x}')\) as follows.
where \(z_{\omega, b} (\mathbf{x}) = \sqrt{2} \cos (\boldsymbol{\omega}^\top \mathbf{x}+b)\).
+Then, \(\boldsymbol{\omega}\) is generated from \(p(\boldsymbol{\omega}) = (2\pi)^{-d/2} \exp (-\|\boldsymbol{\omega}\|^2/2)\) and \(b\) is chosen uniformly from \([0, 2 \pi]\) is chosen uniformly from \([0, 2 \pi]\).
+This approximation is strictly valid in the limit of \(l \to \infty\), where the value of \(l\) is the dimension of the random feature map.
+
+
\(\Phi\) can be represented as a \(l\) row \(n\) column matrix with \(\phi(\mathbf{x}_i)\) in each column by \(\mathbf{x}\) vector of training data as follows:
Next, a method that uses Thompson sampling to make the computation time for candidate prediction \(O(l)\) is introduced.
+Note that using EI or PI will result in \(O(l^2)\) because of the need to evaluate the variance.
+In order to perform Thompson sampling, the Bayesian linear model defined below is used.
+
+\[y = \mathbf{w}^\top \phi (\mathbf{x}),\]
+
where \(\phi(\mathbf{x})\) is random feature map described above and \(\mathbf{w}\) is a coefficient vector.
+In a Gaussian process, when the training data \(D\) is given, this \(\mathbf{w}\) is determined to follow the following Gaussian distribution.
In Thompson sampling, one coefficient vector is sampled according to this posterior probability distribution and set to \(\mathbf{w}^*\), which represents the acquisition function as follows
The \(\mathbf{x}^*\) that maximizes \(\text{TS} (\mathbf{x})\) will be selected as the next candidate.
+In this case, \(\phi (\mathbf{x})\) is an \(l\) dimensional vector, so the acquisition function can be computed with \(O(l)\).
+
Next, the manner for accelerating the sampling of \(\mathbf{w}\) is introduced.
+The matrix \(A\) is defined as follows.
+
+\[A = \frac{1}{\sigma^2} \Phi \Phi^\top +I\]
+
Then the posterior probability distribution is given as
Therefore, in order to sample \(\mathbf{w}\), we need to calculate \(A^{-1}\).
+Now consider the case of the newly added \((\mathbf{x}', y')\) in the Bayesian optimization iteration.
+With the addition of this data, the matrix \(A\) is updated as
+
+\[A' = A + \frac{1}{\sigma^2} \phi (\mathbf{x}') \phi (\mathbf{x}')^\top.\]
+
This update can be done using the Cholesky decomposition ( \(A= L^\top L\) ), which reduces the time it takes to compute \(A^{-1}\) to \(O(l^2)\).
+If we compute \(A^{-1}\) at every step, the numerical cost becomes \(O(l^3)\).
+The \(\mathbf{w}\) is obtained by
A. Rahimi and B. Recht, “Random features for large-scale kernel machines,”
+in “Advances in neural information processing systems,” 2007, pp. 1177-1184.
compute the covariant matrix
+:param X: N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+:type X: numpy.ndarray
+:param Z: N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of search candidate.
+:type Z: numpy.ndarray
+:param params: Parameters
+:type params: numpy.ndarray
+:param diag: If X is the diagonalization matrix, true.
+:type diag: bool
max_num_probes (int) – Maximum number of searching process by Bayesian optimization.
+
num_search_each_probe (int) – Number of searching by Bayesian optimization at each process.
+
predictor (predictor object) – Base class is defined in physbo.predictor.
+If None, blm_predictor is defined.
+
is_disp (bool) – If true, process messages are outputted.
+
simulator (callable) – Callable (function or object with __call__)
+Here, action is an integer which represents the index of the candidate.
+
score (str) – The type of aquision funciton.
+TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+
interval (int) – The interval number of learning the hyper parameter.
+If you set the negative value to interval, the hyper parameter learning is not performed.
+If you set zero to interval, the hyper parameter learning is performed only at the first step.
+
num_rand_basis (int) – The number of basis function. If you choose 0, ordinary Gaussian process run.
mode (str) – The type of aquisition funciton. TS, EI and PI are available.
+These functions are defined in score.py.
+
actions (array of int) – actions to calculate score
+
xs (physbo.variable or np.ndarray) – input parameters to calculate score
+
predictor (predictor object) – predictor used to calculate score.
+If not given, self.predictor will be used.
+
training (physbo.variable) – Training dataset.
+If not given, self.training will be used.
+
parallel (bool) – Calculate scores in parallel by MPI (default: True)
+
alpha (float) – Tuning parameter which is used if mode = TS.
+In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+
+
Returns:
+
f – Score defined in each mode.
+
+
Return type:
+
float or list of float
+
+
Raises:
+
RuntimeError – If both actions and xs are given
+
+
+
Notes
+
When neither actions nor xs are given, scores for actions not yet searched will be calculated.
+
When parallel is True, it is assumed that the function receives the same input (actions or xs) for all the ranks.
+If you want to split the input array itself, set parallel be False and merge results by yourself.
max_num_probes (int) – Maximum number of random search process.
+
num_search_each_probe (int) – Number of search at each random search process.
+
simulator (callable) – Callable (function or object with __call__) from action to t
+Here, action is an integer which represents the index of the candidate.
+
is_disp (bool) – If true, process messages are outputted.
Writing history (update history, not output to a file).
+
+
Parameters:
+
+
action (numpy.ndarray) – Indexes of actions.
+
t (numpy.ndarray) – N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
X (numpy.ndarray) – N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
time_total (numpy.ndarray) – N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) – N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) – N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) – N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
t (numpy.ndarray) – N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
action (numpy.ndarray) – N dimensional array. The indexes of actions of each search candidate.
+
time_total (numpy.ndarray) – N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) – N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) – N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) – N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
max_num_probes (int) – Maximum number of searching process by Bayesian optimization.
+
num_search_each_probe (int) – Number of searching by Bayesian optimization at each process.
+
predictor (predictor object) – Base class is defined in physbo.predictor.
+If None, blm_predictor is defined.
+
is_disp (bool) – If true, process messages are outputted.
+
simulator (callable) – Callable (function or object with __call__)
+Here, action is an integer which represents the index of the candidate.
+
score (str) – The type of aquision funciton.
+TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+
interval (int) – The interval number of learning the hyper parameter.
+If you set the negative value to interval, the hyper parameter learning is not performed.
+If you set zero to interval, the hyper parameter learning is performed only at the first step.
+
num_rand_basis (int) – The number of basis function. If you choose 0, ordinary Gaussian process run.
mode (str) – The type of aquisition funciton. TS, EI and PI are available.
+These functions are defined in score.py.
+
actions (array of int) – actions to calculate score
+
xs (physbo.variable or np.ndarray) – input parameters to calculate score
+
predictor (predictor object) – predictor used to calculate score.
+If not given, self.predictor will be used.
+
training (physbo.variable) – Training dataset.
+If not given, self.training will be used.
+
parallel (bool) – Calculate scores in parallel by MPI (default: True)
+
alpha (float) – Tuning parameter which is used if mode = TS.
+In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+
+
Returns:
+
f – Score defined in each mode.
+
+
Return type:
+
float or list of float
+
+
Raises:
+
RuntimeError – If both actions and xs are given
+
+
+
Notes
+
When neither actions nor xs are given, scores for actions not yet searched will be calculated.
+
When parallel is True, it is assumed that the function receives the same input (actions or xs) for all the ranks.
+If you want to split the input array itself, set parallel be False and merge results by yourself.
max_num_probes (int) – Maximum number of random search process.
+
num_search_each_probe (int) – Number of search at each random search process.
+
simulator (callable) – Callable (function or object with __call__) from action to t
+Here, action is an integer which represents the index of the candidate.
+
is_disp (bool) – If true, process messages are outputted.
Writing history (update history, not output to a file).
+
+
Parameters:
+
+
action (numpy.ndarray) – Indexes of actions.
+
t (numpy.ndarray) – N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
X (numpy.ndarray) – N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
time_total (numpy.ndarray) – N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) – N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) – N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) – N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
Calculate scores (acquisition function) for test data.
+
+
Parameters:
+
+
mode (str) –
Kind of score.
+
”EI”, “PI”, and “TS” are available.
+
+
predictor (predictor object) – Base class is defined in physbo.predictor.
+
training (physbo.variable) – Training dataset.
+If the predictor is not trained, use this for training.
+
test (physbo.variable) – Inputs
+
fmax (float) – Max value of mean of posterior probability distribution.
+If not set, the maximum value of posterior mean for training is used.
+Used only for mode == “EI” and “PI”
+
alpha (float) – noise for sampling source (default: 1.0)
+Used only for mode == “TS”
Reference: (Couckuyt et al., 2014) Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization
Calculate Hypervolume-based Probability of Improvement (HVPI).
+
Reference: (Couckuyt et al., 2014) Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization
Bayesian optimization is well suited for optimization problems such as complex simulations or real-world experimental tasks where the objective function is very costly to evaluate.
+In PHYSBO, the following steps are used to perform the optimization (please refer to the tutorial and API reference for details on each).
+
+
Defining the search space
+
+
+
Define each parameter set (d-dimensional vector) as a search candidate, where N: the number of search candidates , d: the number of input parameter dimensions. The parameter set should list all the candidates.
+
+
+
Defining the simulator
+
+
+
For searching candidates defined above, define a simulator that gives the objective function values (values to be optimized, such as material property values) for each search candidate. In PHYSBO, the direction of optimization is to maximize the objective function, so if you want to minimize the objective function, you can do so by applying a negative value to the value returned by the simulator.
+
+
+
Performing optimization
+
+
+
First, set the optimization policy (the search space is passed to policy as an argument at this stage). You can choose between the following two optimization methods.
+
+
+
random_search
+
bayes_search
+
+
In random_search, we randomly select parameters from the search space and search for the largest objective function among them. It is used to prepare an initial set of parameters as a preprocessing step for Bayesian optimization. bayes_search performs Bayesian optimization. The type of score (acquisition function) in Bayesian optimization can be one of the following.
+
+
TS (Thompson Sampling): Sample one regression function from the posterior probability distribution of the learned Gaussian process, and select the point where the predicetd value becomes maximum as a next candidate.
+
EI (Expected Improvement): Select the point where the expected value of the difference between the predicted value by the Gaussian process and the maximum value in the current situation becomes the maximum as a next candidate.
+
PI (Probability of Improvement): Select the point with the highest probability of exceeding the current maximum of the current acquisition function as a next candidate.
+
+
Details of Gaussian processes are described in Algorithm . For other details of each method, please see this reference .
+If you specify the simulator and the number of search steps in these methods, the following loop will rotate by the number of search steps.
+
+
i). Select the next parameter to be executed from the list of candidate parameters.
+
ii). Run the simulator with the selected parameters.
+
+
The number of parameter returned in i) is one by default, but it is possible to return multiple parameters in one step. For more details, please refer to the “Exploring multiple candidates at once” section of the tutorial. Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYSBO (e.g., by experiment rather than numerical calculation), and register the evaluated value in PHYSBO. For more details, please refer to the “Running Interactively” section of the tutorial.
+
+
+
+
Check numerical results
+
+
+
The search result res is returned as an object of the history class ( physbo.search.discrete.results.history ). The following is a reference to the search results.
+
+
res.fx: The logs of evaluation values for simulator (objective function) simulator.
+
res.chosen_actions: The logs of the action ID (parameter) when the simulator has executed.
+
fbest,best_action=res.export_all_sequence_best_fx(): The logs of the best values and their action IDs (parameters) at each step where the simulator has executed.
+
res.total_num_search: Total number steps where the simulator has executed.
+
+
The search results can be saved to an external file using the save method, and the output results can be loaded using the load method. See the tutorial for details on how to use it.
PHYSBO (optimization tools for PHYSics based on Bayesian Optimization) is a Python library for fast and scalable Bayesian optimization. It is based on COMBO (Common Bayesian Optimization) and has been developed mainly for researchers in the materials science field. There are many attempts to accelerate scientific discovery through data-driven design-of-experiment algorithms in the fields of physics, chemistry, and materials. Bayesian optimization is an effective tool for accelerating these scientific discoveries. Bayesian optimization is a technique that can be used for complex simulations and real-world experimental tasks where the evaluation of objective function values (e.g., characteristic values) is very costly. In other words, the problem solved by Bayesian optimization is to find a parameter (e.g., material composition, structure, process and simulation parameters) with a better objective function value (e.g., material properties) in as few experiments and simulations as possible. In Bayesian optimization, the candidate parameters to be searched for are listed in advance, and the candidate with the largest objective function value is selected from among the candidates by making good use of machine learning (using Gaussian process regression) prediction. Experiments and simulations are performed on the candidates and the objective function values are evaluated. By repeating the process of selection by machine learning and evaluation by experimental simulation, we can reduce the number of times of optimization. On the other hand, Bayesian optimization is generally computationally expensive, and standard implementations such as scikit-learn are difficult to handle a large amount of data. PHYSBO achieves high scalability due to the following features
Copyright (c) <2020-> The University of Tokyo. All rights reserved.
+
Part of this software is developed under the support of “Project for advancement of software usability in materials science” by The Institute for Solid State Physics, The University of Tokyo.
In this tutorial, the problem of finding a stable interface structure for Cu is used as an example. The values that have already been evaluated are used, although the evaluation of the objective function, i.e., the structural relaxation calculation, actually takes on the order of several hours per calculation. For more information on the problem setup, please refer to the following references
+
+
+
Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).
+
+
+
+
Save the dataset file s5-210.csv into the subdirectory data, and load dataset from this file as the following:
Prepare a model similar to the one used for training as gp
+
+
[14]:
+
+
+
#Definition of covariance (Gaussian)
+cov=physbo.gp.cov.gauss(X_train.shape[1],ard=False)
+
+#Definition of mean value
+mean=physbo.gp.mean.const()
+
+#Definition of likelihood function (Gaussian)
+lik=physbo.gp.lik.gauss()
+
+#Generation of a Gaussian Process Model
+gp=physbo.gp.model(lik=lik,mean=mean,cov=cov)
+
+
+
+
Prepare a model similar to the one used for training as gp
+
+
[15]:
+
+
+
#Input learned parameters into the Gaussian process.
+gp.set_params(gp_params)
+
+
+#Calculate the mean (predicted value) and variance of the test data
+gp.prepare(X_train,t_train)
+fmean=gp.get_post_fmean(X_train,X_test)
+fcov=gp.get_post_fcov(X_train,X_test)
+
In the following example, the search space X is defined as a grid chopped by window_num=10001 divisions from x_min=-2.0 to x_max=2.0. Note that X must be in window_num x d ndarray format (d is the number of dimensions, in this case one). In this case, d is the number of dimensions, in this case two, so we use reshape to transform it.
Here, we define the simulator class to set as the objective function.
+
In this case, the problem is to find the minimum \(x\) such that \(f(x) = 3 x^4 + 4 x ^3 + 1.0\) (the answer is \(x=-1.0\)).
+
In the simulator class, we define the __call__ function (or __init__ if there are initial variables, etc.). (If there are initial variables, define __init__.) The action indicates the index number of the grid to be retrieved from the search space, and is generally in the form of an ndarray so that multiple candidates can be calculated at once. In this case, we choose one candidate point from X as action_idx=action[0] to calculate only one candidate at a time. Since PHYSBO
+is designed to find the maximum value of the objective function, it returns the value of f(x) at the candidate point multiplied by -1.
+
+
[3]:
+
+
+
# Declare the class for calling the simulator.
+classsimulator:
+
+ def__call__(self,action):
+ action_idx=action[0]
+ x=X[action_idx][0]
+ fx=3.0*x**4+4.0*x**3+1.0
+ fx_list.append(fx)
+ x_list.append(X[action_idx][0])
+
+ print("*********************")
+ print("Present optimum interactions")
+
+ print("x_opt=",x_list[np.argmin(np.array(fx_list))])
+
+ return-fx
+
Next, set test_X to the matrix of search candidates (numpy.array).
+
+
[4]:
+
+
+
# set policy
+policy=physbo.search.discrete.policy(test_X=X)
+
+# set seed
+policy.set_seed(0)
+
+
+
+
When policy is set, no optimization is done yet. Execute the following methods on policy to optimize it.
+
+
random_search.
+
bayes_search.
+
+
If you specify the simulator and the number of search steps in these methods, the following loop will be executed for the number of search steps.
+
+
Select the next parameter to be executed from the candidate parameters.
+
Execute simulator with the selected parameters.
+
+
The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step. See the section “Searching for multiple candidates at once” for details.
+
Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYSBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the “Running Interactively” section of the
+tutorial.
Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search.
+
argument.
+
+
max_num_probes: Number of search steps.
+
simulator: The simulator of the objective function (an object of class simulator).
When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.
+
0020-th step: f(x) = -19.075990 (action=8288)
+ current best f(x) = -0.150313 (best action=2949)
+
Next, we run the Bayesian optimization as follows.
+
argument.
+
+
max_num_probes: Number of search steps.
+
simulator: The simulator of the objective function (an object of class simulator).
+
score: The type of acquisition function. You can specify one of the following
+
+
TS (Thompson Sampling)
+
EI (Expected Improvement)
+
PI (Probability of Improvement)
+
+
+
interval:
+The hyperparameters are trained at the specified interval.
+If a negative value is specified, no hyperparameter will be learned.
+0 means that hyperparameter learning will be performed only in the first step.
+
num_rand_basis: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used.
The search result res is returned as an object of the history class (physbo.search.discrete.results.history).
+
The following is a reference to the search results.
+
+
+
res.fx : The history of evaluated values of simulator (objective function).
+
res.chosen_actions: The history of action IDs (parameters) when the simulator was evaluated.
+
fbest,best_action=res.export_all_sequence_best_fx(): The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.
+
res.total_num_search: Total number of simulator evaluations.
+
+
+
Let’s plot the objective function value and the best value at each step.
+
res.fx and best_fx should range up to res.total_num_search, respectively.
PHYSBO can calculate acquisition functions for candidates in parallel by using MPI via mpi4py . To enable MPI parallelization, pass a MPI communicator such as MPI.COMM_WORLD to a keyword argument, comm of the constructor of the policy.
+
+
+
+
\ No newline at end of file
diff --git a/manual/v2.0.2/en/notebook/tutorial_basic.ipynb b/manual/v2.0.2/en/notebook/tutorial_basic.ipynb
new file mode 100644
index 00000000..817c2a53
--- /dev/null
+++ b/manual/v2.0.2/en/notebook/tutorial_basic.ipynb
@@ -0,0 +1,590 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Basic usage of PHYSBO\n",
+ "\n",
+ "## Introduction\n",
+ "\n",
+ "In this tutorial, we will introduce how to define the simulator class and find the minimum value of a one-dimensional function using PHYSBO.\n",
+ "\n",
+ "First, we will import PHYSBO."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.091028Z",
+ "start_time": "2021-03-05T04:50:29.600019Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import physbo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Defining the search space\n",
+ "\n",
+ "In the following example, the search space ``X`` is defined as a grid chopped by ``window_num=10001`` divisions from ``x_min = -2.0`` to ``x_max = 2.0``.\n",
+ "Note that ``X`` must be in ``window_num`` x ``d`` ndarray format (``d`` is the number of dimensions, in this case one). In this case, ``d`` is the number of dimensions, in this case two, so we use reshape to transform it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.097211Z",
+ "start_time": "2021-03-05T04:50:30.092637Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "#In\n",
+ "import numpy as np\n",
+ "import scipy\n",
+ "import physbo\n",
+ "import itertools\n",
+ "\n",
+ "#In\n",
+ "#Create candidate\n",
+ "window_num=10001\n",
+ "x_max = 2.0\n",
+ "x_min = -2.0\n",
+ "\n",
+ "X = np.linspace(x_min,x_max,window_num).reshape(window_num, 1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Defining the simulator class\n",
+ "\n",
+ "Here, we define the simulator class to set as the objective function.\n",
+ "\n",
+ "In this case, the problem is to find the minimum $x$ such that $f(x) = 3 x^4 + 4 x ^3 + 1.0$ (the answer is $x=-1.0$).\n",
+ "\n",
+ "In the simulator class, we define the ``__call__`` function (or ``__init__`` if there are initial variables, etc.).\n",
+ "(If there are initial variables, define ``__init__``.) The action indicates the index number of the grid to be retrieved from the search space, and is generally in the form of an ndarray so that multiple candidates can be calculated at once.\n",
+ "In this case, we choose one candidate point from ``X`` as ``action_idx=action[0]`` to calculate only one candidate at a time.\n",
+ "Since **PHYSBO is designed to find the maximum value of the objective function**, it returns the value of f(x) at the candidate point multiplied by -1."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.104491Z",
+ "start_time": "2021-03-05T04:50:30.099622Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# Declare the class for calling the simulator.\n",
+ "class simulator:\n",
+ "\n",
+ " def __call__(self, action ):\n",
+ " action_idx = action[0]\n",
+ " x = X[action_idx][0]\n",
+ " fx = 3.0*x**4 + 4.0*x**3 + 1.0\n",
+ " fx_list.append(fx)\n",
+ " x_list.append(X[action_idx][0])\n",
+ "\n",
+ " print (\"*********************\")\n",
+ " print (\"Present optimum interactions\")\n",
+ "\n",
+ " print (\"x_opt=\", x_list[np.argmin(np.array(fx_list))])\n",
+ "\n",
+ " return -fx"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Setting policy\n",
+ "\n",
+ "First, set the optimization `policy`. \n",
+ "\n",
+ "Next, set `test_X` to the matrix of search candidates (`numpy.array`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.117205Z",
+ "start_time": "2021-03-05T04:50:30.108470Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# set policy \n",
+ "policy = physbo.search.discrete.policy(test_X=X)\n",
+ "\n",
+ "# set seed\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When `policy` is set, no optimization is done yet.\n",
+ "Execute the following methods on `policy` to optimize it.\n",
+ "\n",
+ "- `random_search`. \n",
+ "- `bayes_search`.\n",
+ "\n",
+ "If you specify the `simulator` and the number of search steps in these methods, the following loop will be executed for the number of search steps.\n",
+ "\n",
+ "i) Select the next parameter to be executed from the candidate parameters.\n",
+ "\n",
+ "ii) Execute `simulator` with the selected parameters.\n",
+ "\n",
+ "The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step.\n",
+ "See the section \"Searching for multiple candidates at once\" for details. \n",
+ "\n",
+ "Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYSBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the \"Running Interactively\" section of the tutorial.\n",
+ "\n",
+ "### Random Search\n",
+ "\n",
+ "First of all, let's perform a random search.\n",
+ "\n",
+ "Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search. \n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:30.149378Z",
+ "start_time": "2021-03-05T04:50:30.120660Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "fx_list=[]\n",
+ "x_list = []\n",
+ "\n",
+ "res = policy.random_search(max_num_probes=20, simulator=simulator())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.\n",
+ "\n",
+ "````\n",
+ "0020-th step: f(x) = -19.075990 (action=8288)\n",
+ " current best f(x) = -0.150313 (best action=2949) \n",
+ "````\n",
+ "\n",
+ "\n",
+ "### Bayesian Optimization\n",
+ "\n",
+ "Next, we run the Bayesian optimization as follows.\n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). \n",
+ "- `score`: The type of acquisition function. You can specify one of the following\n",
+ " - TS (Thompson Sampling) \n",
+ " - EI (Expected Improvement) \n",
+ " - PI (Probability of Improvement) \n",
+ "- `interval`: \n",
+ "The hyperparameters are trained at the specified interval. \n",
+ "If a negative value is specified, no hyperparameter will be learned. \n",
+ "0 means that hyperparameter learning will be performed only in the first step. \n",
+ "- `num_rand_basis`: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.482575Z",
+ "start_time": "2021-03-05T04:50:30.151250Z"
+ },
+ "code_folding": [],
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.bayes_search(max_num_probes=50, simulator=simulator(), score='TS', \n",
+ " interval=0, num_rand_basis=500)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Checking the results\n",
+ "\n",
+ "The search result ``res`` is returned as an object of the ``history`` class (`physbo.search.discrete.results.history`). \n",
+ "The following is a reference to the search results.\n",
+ "\n",
+ "- `res.fx` : The history of evaluated values of simulator (objective function).\n",
+ "- `res.chosen_actions`: The history of action IDs (parameters) when the simulator was evaluated. \n",
+ "- `fbest, best_action= res.export_all_sequence_best_fx()`: The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.\n",
+ "- `res.total_num_search`: Total number of simulator evaluations.\n",
+ "\n",
+ "Let's plot the objective function value and the best value at each step. \n",
+ "`res.fx` and `best_fx` should range up to `res.total_num_search`, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.747272Z",
+ "start_time": "2021-03-05T04:50:32.484125Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-03-05T04:50:32.897286Z",
+ "start_time": "2021-03-05T04:50:32.754938Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAD3CAYAAADv7LToAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAp40lEQVR4nO3deXDkZ33n8fe3b90zkua0RzPjA7AxGOzBB2BsbHMkBJJyKnFBWEgKMJvKVkLNVrZqQ4VcewaTbLHxkji3QxYvSbFZAwXB9mLHxh6D7QDxwYJtPJ6x55BGmtHV9+/ZP379a0mtbvWhnpl+NJ9XlcpWS9N6pNF89NX3ucw5h4iI+C12tgcgIiLrpzAXEdkAFOYiIhuAwlxEZANQmIuIbACJs/FBx8fH3Z49e87GhxYR8dYTTzwx5ZzbUu9tZyXM9+zZw+OPP342PrSIiLfM7GCjt6nNIiKyASjMRUQ2AIW5iMgGoDAXEdkAFOYiIhuAwlxEZAPoapib2X4zO2Bm+7v5vCIisraurTM3s13AbufcNWb2FTO7xzn3XLeeX86MIHBMzec5cipXecmyZSjNe163AzOr+2emFwoAjA6kWvoYU/N5DrxwgkPTWXaMZNgxkmHnpj62j2RIxlurL5xzvDC1wGMvTJOIG5fuGOairYNkkvEVn8vR2RyHphc5mS0ynyuxUCgxlyuRL5ZXPWc6GSediNGXipNJxBlIJxjpS7KpP8lIX5J0Isb0QoHJuTyT83km5/KkEzFG+lNs7k+yqS9FOhkjVyyTKwZki2WyhXLl9TLZyuOBcyTjRjIeIxGPkYgZpXJAsewolgNKgaMcOJyDwDmcc8RixmA6wVAmwWA6yUA6Tr4UVD+n+XyJYik8ztoMDIjFDDOImxEzIxYz+lNxhjNJhvsSDGeSpBIxTmWLnFwscnKxwKlskVLgCAJHUPn4/ak452/u5/zNfewa7Wdzf5JT2SKHZ7K8fDLLKyezxMzYNpxm63CGrUNphtJJJufzHJvNcWw2x/G5PIFzpOIx0okYqUSMmBmBc5QDKAcB5cBRChzFsgu/HoED58CMmIFhxGOQSsTIVP6u0ok4fak4A6kE/enwv4m4kS2EX+/FQplsoUSuGFT/HvKlYNXnWO8o8NGBFHu3DLJ3bIDzNvcRjxnz+RIvTi1w8MQih2cWSSdijA6mGR9IMTqYIm7G1HyBqfk8J+bznMwWSSVi9CXj4Usqzq7Rfq6Y2NzS93k7urlp6J3A3WZ2JbAVuA6ohrmZ3QbcBjAxMdHFD3t6BJVvrFSi9V9evv7UEe569CB/85GricfqB18vc85x8x8+yAuTC6ve9ujVJ/id972WRE3YPvL8FL/yt0/igP926xu44dVb6z7vo8+f4N5nj/Ho8yf4wdG5uh8/ETM+9rYL+LfveNWqjwOQK5b5yveP8K3npnjk+SmOzeZXvD0eMy7cMsD2kT5enlnk0EyWQilo+Pku/9mkY/1bl4gZpWDjfcHW+n5IxWMMZhLVwmU93nf5zp4P863Ad4FPEYb2u5e/0Tl3J3AnwL59+7r2nXB8LsfJxSIXbhnsaoDu/+J3+cYzx7j5km289/KdvO1V46QT8TX/zEM/muKR50/w5EszvGnPaNfGcqbkSwEvTC7wE5dt55YrzmfHSIbtIxn+7KEf88cPPs8rJ7P80QeuYCCdwDnHXz/yIr/31We5YHyAeMz4pb/6Dr9208X86o0XE6v8Xfzo2By/+5VneOhHU6QTMd60Z5Rff9dO3nzhGBduHeT4bJ4jp8Lq7sAL03zuged58uAM//39b2TrcKY6tod+NMlv/sNTvHhikbGBFNdeOMabLxzn2gvHcM7x7JE5nj0yy7NHZjk2l+PirUPcfMk2Jsb6mRjtZ3N/qlLVJhjMJFb9XTrnKJQDcsWAfKWKns+XOLVY5FQ2fMkVy4wOptkymGbLUJrxwRTFsuPkYoGTleo2VyxXK7BMMk4mGVZlmUpllknGMYNSEFafhXJYkSZiMVLxGIm4kYhbtZo2AzOjHDjm82EFPp8L/5tOxKqfz2A6QSoew1U+F0dUcYb/DRyUA0e2UGY2F34+s9kihVLASOW3ik39SYb7kqTiMWJG9ePP5Uu8PJPl0PQih2eyHJ/LMz6Y4vzNfezc1Md5m/pwEFbgs3mOz+WYy5XYMpRm61CmWrHHzSiUAwql8PMOAkc8ZksvFn7uiVjl6xCz6m+DbtnnUCiHf0f5UlD9jWexUGYhX2KxUKZYDqp/B/2ppa99OhknU6nq632Na78fJufz/HhygRdPLPDjqUVOZQvsGu1nz9gAu8f62TXaT6EUML1Q4MR8gemFAmXnGB9IMT6UZmwgxab+FMXy0jizhTLp5No50inr1k1DZvZRIHoZAF7nnPuzeu+7b98+163t/Lf+yaM89uNpBlJxXn/+Jt4wsYk3XzjGdRfXPb6gZbf8j2/x46mwQp1ZLDKUSfBTr9/J77zvtQ2r9Y/+9Xe479nj3Pa2C/iNn7xkXR//bDg+m+Oq/3Q/v/czl/Gvrtm94m1/+9hBfvMfnuKSHcP88Qev5LP3/4i/e+Iw77h0G3946xuIm/HJf/gXvvTky1z/qi38zvtey1898iJ/c+Ag/ak4n7j5VfzC1RMr2iD1fOnJw3zyfz/FQDrBZ9//Bi7aMsjvffVZvvy9V9gz1s9vv++1vO3iLdUfFiLnEjN7wjm3r97bulmZ3wu82zn3lJn9V+CuLj53QycXi7xm+xBX7R3lu4dO8qf/9AKfe+B5Dvz7m9g+kmn+BA1kiwFX7h7lcx+8gm89N8XnD7zEF779Ej+/73ze2OBXpKOzOQDue+ZYT4b5d16cZiCV4NKdw3XfPpsrATCcWf1t8QtX72bnSB+/8j+f5PpPf5PAwa/edDGfuGmpCv/Mz13Ovt2j/PY9T3PD7Q8QM3j/VRPsf8erGBtMtzTGW644n8vOG+GXP/8EH/yzx+hPJSiUAj5x88X86+svbPrDQORc1bUwd84drKxkeQz4snPu6W4991qKQcClO4b53Z++DIB7nznGx+56nGOzuXWFeb5YJpOMkYzHuOHVW9k6lOG+Z49x5FSONzb4M0dP5ehLxnlhaoHnjs9z0dbBjj9+twWB45c//wSXn7+JP//FN9V9n9lcEYDhTLLu29/+mq188ePX8h+++gwfvnYPP/G6HSvebmZ84OoJLjtvmC98+xAfunY3l+yo/4NjLa/aNsQ9/+at/PY9TzOzWOCT77mUveMDbT+PyLmkq6cmOuduB27v5nM2UywHpJZNlo0PhisqTizkG/2RlkS9z8jOTeEPhldOZuu+f6EUMDVf4NZ9u/hfjx/i3meOnbYwL5UDfvZzj/CrN13MTZdsa+nPfPfwSabmC8wsNp7AmYsq877G3xaXnTfC3bddu+bHev35m3j9+ZtaGlcjA+kEn/65y9f1HCLnEu83DRVLjkR8qX86Xvl1fmp+fbPO2WJ5xa/0I31J+pJxjpzK1X3/43Ph42+c2MRl5w1z37PH1vXx13JiocD3Dp/ioR9Ntfxn7q+MJ2ql1DObDSvzoQaVuYj0Lv/DvBysWJs8FlXm6wzzXDGgL7UU5mbGjk0ZjpyqX5kfrYT8tpEM77hkO0++NMPk3Pp+O2gkWh4VTdC24v5njwNLgV1PtTJXmIt4Z8OFeX8qQV8yzon5zoPUORdW5jWrVnaO9DWszKPJzx0jGW6+dCvOwTd/cLzjMaxlphLmL55oLcwPzyzyg6NzDKTinFojzKOe+VCdCVAR6W0bIMxXb+wZG0xxYh2L+/OVjSa160F3jGQ4crJBmFdCfvtwhkt3DHPepj6+8czpabVMV/reh2eyFMuNN8VEoqr8J163o7o2t565XJF4ZZegiPhlA4R5QKJmzfHYYJqpdVTm+WIYkH21Yb6pj+NzOUp1AvToqVy4tbsviZlx8yVbefi5SbKF+sG5HlFlXg4ch2fqt32Wu+/ZY1ywZYDLzx8BltoptWazJYYyiYbb9kWkd3kd5tGW+9rzPMYHUuvqmWcrlWvtmuadIxkCB8fq9MKPzubYMZKpBuE7Lt1Orhjw8HOtT1K2anphqVXyYpO++Xy+xGMvTHPzJdsY7gt74VE7pdZcrqh+uYinvA7zYhBWyPXbLJ1X5lEboi+18nl3bOoD4Eid5YnHZnNsW7b9/OoLRhnKJLivTqvlxHyef3z6KP/hK8/w03/0MFf83r1NQ3m5mcVC9eiCZpOgD/1wkkI54KbXbK0GdaO++WyupH65iKe8/pdbKodHESTjq9ssJ+YLOOc6ahlUK/PE6soc4JU6k6BHTuW4cvfSztBos9H9PzhGOXD88Ngc//j0Ub7x9DGeOTILhD+EXn/eCNMLBb7xzFFue9uFLY1veqHA+Zv7mJ4vNJ0Eve/Z44z0Jbly92a+d/gk0HhFiypzEX95HebR5F8iVlOZD6QoBY7ZbImR/vbDKdegzdKoMnfOcXw2v2rH6Tsu3caXv/cKb/4v93NsNo8ZXDmxmV9/16u5au8orz9/hHQizs1/8CAPP3ei5TCfWSwwOhAeHPXiicWG71cOHN/8f8d5+6u3kIiH/XxovNZ8Nlti91h/S2MQkd7idZgXKmGerGmzRBuHTizkOwzz8Hlrwzw6T7p2eeL0QoFCOWD78Mowf/urt/Ca7UNsH8nwiZu3c9Ml4bEAtd560Th3f+clcjUblRqZXiiwfThDXyrO9w+favh+3z00w/RCobpLNKq616zM+1SZi/jI7555pc2SWtVmibb0dzYJulSZr/7y7BzpW7Wl/8iyZYnLDWWSfP0Tb+Ovfukq3n/VRN0gB7ju4nFyxYAnD860NL6ZhQKbB1LsHR/g8MxiwzO773v2OImYcf2rwxMko6BWz1xk4/E6zKMlgrWrWaIbbzrdOLQ0Abq6Sg53ga6szI9VNgx1erDX1ReMkYgZD7W48mW60mbZMzZA4ODQTP1Wy/3PHuOqvaPVijyTjJNKxOquZonOy1bPXMRPXod5sUGYr/d8lkYToAA7RvpWbemvVuYdhvlgOsEbJzbxrRbCPLyGLGBTf5I9lZME662EOTS9yA+Pza86iGs4k2Q2u7pnPl/po6syF/GT12FeKNVfzbK5f33nszTqmUO4omVqvkC+tLQZ6NhsjpjBlhbP7K7nrRdt4V9ePlXdENRItPtztD9VPRa23iRo9IPh+leNr3h8uC9Rt2dePf5WPXMRL3kd5o0q81RlJ2ana82rbZY6YR6taDm6rNVy9FSOLUPpuvdWtuqtF4/hHDzy/Ik13y8K+80D4SXCQ5lE3cr8wAsn2DKU5sItK4/hHc4k67ZZ5ta4mEJEet+GDHOobBxaZ5slXWcCdEe01nzZGS1HZ3OrJj/bdfn5mxhKJ3j4uck13y86MXF0IIWZsXd8YNVac+ccj75wgmsuGFu1zn6kL7l2Za6euYiXPA/zqM2y+tMYH+j8fJZ8sYwZpOvc9RmF+fK++dFT67vVCCARj3HNhWNNt/9Hl0tEraQ9YwOrdoG+eGKRY7N5rrlg9aXSw33JuuvM56o9c4W5iI88D/OoMl+9y3M9JyeGx9/G6+4e3TFS2Th0qruVOYTrzQ9NZzm4xq7O5ZU5wJ7xAV45mV3Rwz/wQtiqueaCsVV/fjiTqLs0MarW17plSER61wYJ80Ztlk575kHdNeYQLlfc3J+srjVfyJeYy5XYts7KHOCtF4eTlWvdIDSzUMCM6m7OveP94fLE6aXfFB59/gRbh9JcUOfezKjN4pxb8fhcTrcMifhs44b5QJqZxWLd42qbqb3/s9aOZZdULL+UYr0uGB9g50iGh9cI8+nFApv6ktWDtnaPrVye6JzjQIN+OYRtllLgqvMCkVktTRTxmudhXtkBmlgdWtHFzjOLjW/WaaT2/s9aOzdlqpX5sei6uC60WcyMt1w0ziPPT1EOXN33mVkosrnSYgHYG4V5pTXzwtQCx+fydVssQMOTE+dyRfqS8bo/GEWk93n9L3ftNsvS+SztCtssrVXmjbbyd+qtF48zmyvxLy/XP3NleqHAaP9SmG8eSDHSl6xOgkb98msvbBDmlZ547cah2WxJ/XIRj22IMK+3vntsoPONQ+GBV42/NDs2ZTiVLbJYKFXbLOtdzRJ5y0Vh3/zhH9VfojizWFhRmUM4CXqwsnHowAvTbBtOs6fB6YcjDS6omMsX1S8X8ZjXYV5ocJ45LB221cnyxGanF+6srGh55WSOY7M5hjMJ+lPdqWrHB9NcuGWg4WmItZU5wN6xfn48tRCuL3++cb8cGp+cOJstacOQiMe8DvNi5bTAVIMJUOiwMi81mwBdWmt+pAtrzGvtGRvgpenVW/Sdcw0r81dOZXnmyCxT83mubdAvh8YnJ87lVJmL+MzrMC8FjXvmI5UVH530zLOFZhOg0SUVYWW+vVKpd8uu0X4OTS+uWj44ny9RLDtGB1aG7p6xAZyDv3v8MFB/fXmk2maprcxzJZ3LIuIxr8M8Ws2SqNNmicWM0Q4vdm42AbptOIMZvHIqG+7+HO78gK16Jkb7WSiUqxuEIjOVi5w396+uzAG+9ORhtg9n1rwtKFp6WLsLNKzM1WYR8ZXXYR5dypCM1f80xgZSHR2D22wCNJWIMT6Y5tB0lsn5fNdWskQmRsMwrm21VE9MHKjtmYdhPpsrce2FjfvlEP4W05+Kr2qzhD1zVeYivvI6zIvlgETMiMXqh9f4YLrDpYnNr2/bOZLhe4dP4hxdb7NElXVtmFfPZakJ85H+JJsr1+PVO4+lVnim+VKY54plCuVAlbmIx7wO81Lg1tzk0snJic45cqVgzQlQCNeaP3d8HoDtI91ts5y/OQzzQ7VhvrB0lnmtqNWyVr88MtK38hhcnWUu4r+2w9zMEmb2a2Y2WfP4fjM7YGb7uze8tRVKQd1liZGxgXTb57MUy45y4NZss0C41jzSjd2fy/Wl4mwdSq9usyzUr8wBLts5wgXjA9UWzVrCCyqWeuY6y1zEf538600AjwHfjx4ws13AbufcNWb2FTO7xzn3XLcG2UixHDStzBcK5ZZvvYdwWSLUv2VouZ3LWis7utxmgbBvXq/NEo9Z3dD95HsuIVcsr9kvjwxnkitOfayemKieuYi32q7MnXM559wBYPm6uXcCd5vZlcBW4LoujW9NzcI8Op+lnaNwc4XWwjyqzFOJWLVf3U0To/0rTkIEmF4osrk/VTewM8k4m+q0X+qpbbPM6ZAtEe81DXMz+5CZ3bfspV4bZStQAD4F3AZsq30HM7vNzB43s8cnJ9e+TadVxbIjWeeQrcjSxqHWWy1r3f+5XFSNbx/OtFQNt2vXaD+vnMpWV+xA2DOvXWPeieGa24bUMxfxX9NSzDl3F3BXk3ebBO4APgoMAKvOcHXO3QncCbBv3776RwK2qZU2C7S3CzRqszSbAN1Zqcy7vSwxMjHaj3Pw8sls9eLm6cXCqjXmnRjOJJjLlwgCRyxmqsxFNoBurWa5FzjsnHsKuAV4tEvPu6ZiOWi4xhzCpYnQ3vks2WqbZe0vzdahDPGYdeVSinomKssTl986FFbmXQjzviTOwVw+DHH1zEX815Uwd84dBA6Y2WPAnHPu6W48bzPN2ixR8LXVMy+2VpnHY8bP79vFu167qqPUFdGqlOXLE+udy9KJ4Zot/XO5EvGY0Z9qbZJYRHpPx79XO+durnn9duD2dY+oDc3aLP2pOJlkrK2eeXQDT7qF1S//+ZbXtfy87doymCadiFVXtASBY2axWHeNebuqJydWeuWzla38p6P3LyJnhtebhpqFuZlV1pq3U5lHE6Bn90sTi9mK5YlzuRLlwHWpMg9/hp9aVpmrXy7iN8/D3NU9/na58cEUU220WfItToCeCWGYh8sTl85lWX9fe+nkxKWeufrlIn7zPMyDuicmLjc22N4u0GyL68zPhOVH4VZ3f56GNosqcxH/eR3m4Xb+tT+FsTaPwW11AvRMmBjtZz5fYmaxWD2XpSthXjMBOptTZS7iO6/DvBQ0b7OMDaaZXiisuuihkWyLm4bOhOVH4TY6/rYTQ+kEZitXs+iWIRG/eR3m4QTo2m2W8cEUhXJQXVPdTFSZpxNn/0szsewo3Jk1DtlqVyxmDKUT1QsqZrPF6qSoiPjp7CfWOhRLAYmmlXl7u0BzpTLpRKzhGeln0q5lR+FOLxZIxWMMdGkteLSlPwgc8wVV5iK+8zrMC+W1zzOH9s9nyTW5//NM6kvF2TKU5qUTYWW+eSDZtbXgw5kkp7JF5vIlnNPxtyK+8/pfcLEckGq6miWszFu9Pi5XbH4xxZkUrTUfSCe6MvkZiU5OnMtpK7/IRuB1ZV5qsmkIls5nafX6uGyT+z/PtCjMZxa7cy5LJLqgIlprrp65iN96J7U6EJ7NsvanEFWzU3OtVua902aBMMxfOZXl2GyuK5OfkeHMyspcPXMRv3kb5s45CuWAZJOJylQixmA6seo2+kZypaDnwtw5ODyT7cq5LJHhvrBnPlu9Mk5hLuIzb8O8FITrxpu1WQAG0wnm8y2GeaHH2ixjS3d6drMyH+lLslgoV5c8ageoiN96J7XaVCpXwryF9eBDmUT1AoZmcqVyz02ARka7eD1dtHrl8Mnw7BfdMiTiN2/DvFAOd2q2Upm3E+bZHlqaCEtH4UJ3K/MovA/PhKcyqjIX8Zu3YV6shnnzddeDmWR1oq+ZXKm3wjwWM3ZVqvNurmYZqYZ5lr5kvKUfiiLSu7z9F1xstzJveTt/b02AwlKrpZvrzKPK/OWZrKpykQ3A3zAvtT4BOtxOz7zHJkBhKcy7us68snrl6GxO/XKRDaC3UqsNxaCNNks60VabpZcmQAH27dnM9uFMdTdrN0SbhMqBU2UusgF4+684arM0OwIXwg0xuWLQ9Jq5UjmgWHY912b5qdfv5Kdev7OrzzmyrBrXGnMR//lbmVfaLM1OTYSllRrzTVotuVJv3P95JvQl4yQqG65UmYv4z9vUKrSzmiUdhlWzvnkv3TJ0uplZtVeunrmI/7wN81KbbRaAuSa7QKP7P9PnQJjDUqtFlbmI/7wN82IbO0Cj3Y7NKvN86dypzGHp66KeuYj/PA7zsDJPtHAjULUybxLm2ULv3P95JlTbLKrMRbznbZi3s51/sFqZr91myVUq83NhAhSWKnL1zEX8521qVZcmtnjQFsB8k12g59IEKCyFuHrmIv7zNsyrpya2sTSxeZslqszPlTBXz1xko/A2zNtZmphOxEnFY8w2bbOcYz3zTFSZK8xFfOdtmLdz0BaE1XnTTUOFc6tnvnUojVl3z3wRkbPD22ZpsdR+mDfdNFQ6t9os7718JxdsGWDLUPpsD0VE1qmjEtTMPmtmD5rZXyx7bL+ZHTCz/d0bXmNL18Y1b7NAuKKl6WqWc2wCNJOMc+Xu0bM9DBHpgrbD3MwGgL93zl0PHDOz68xsF7DbOXcNcKOZXdTtgdZqZ2kiwFA62XQ1y7m2zlxENo62w9w5t+Cc+6fKq9NAHngncLeZXQlsBa6r/XNmdpuZPW5mj09OTq5nzEB755lD622WVDxGvIWNSCIivaRpEprZh8zsvmUv+yuPXwxc4Zz7NmGAF4BPAbcB22qfxzl3p3Nun3Nu35YtW9Y98GI5IGa0HLxDmWRLSxPT58jkp4hsLE0nQJ1zdwF3LX/MzHYCvw98uPLQJHAH8FFgAJjq7jBXa3Y2ea2hTKLp0sR8j93/KSLSqk7L0M8AH3fOzVZevxc47Jx7CrgFeLQbg1tLsexaOjExMpRJMJ8v4Zxr+D65YnDOTH6KyMbSyQToVcANwBfN7AEze59z7iBwwMweA+acc093eZyrFMtBSycmRoYyCZyDhcpa8nqyPXj/p4hIK9peZ17pke+o8/jtwO3dGFQriuWgpRMTI4Pp6OTEYvWyilq9eP+niEgrvC1DCx30zGHtq+PCCVCFuYj4x9swL5VdSycmRqIwn10jzHOlQBOgIuIlb8M8XM3SeptlqIUzzfPFMn3qmYuIh7xNrrBn3k5l3vy2oWxRSxNFxE/ehnmh7NpezQJrX1CRK2oCVET85G2Yl8oBqbbaLEurWRoJlyYqzEXEP96Gebs7QPuTcczWbrPkSoG284uIl7xNrkLZkWgjzGMxYzDd+LCtIHAUStoBKiJ+8jbMi6X22iwQXpPWKMzPtYspRGRj8TfM22yzAJXKvH7PPFcMzzJXZS4iPvI2zEuBazvMo8O26skWz637P0VkY/E2uQql9ivztS6oyBXVZhERf3kb5u3uAAUYzCTXaLMozEXEX56HuSpzERHwOMxL5c565nMNeuaaABURn3kb5oVyQDLRXptlKJ2gUArIl1ZfUJEtaAJURPzlbXIVywHJNg7agrUP29I6cxHxmZdhXg4cgaOjNgvUv6BCbRYR8ZmXYV4sh8Hbdptljco8Wmeus1lExEdeJlcU5qkOdoBC/ZMT85UwV2UuIj7yNMwd0Hmbpd6KlqUJUIW5iPjH0zAPK/NEBwdtQeMJ0HjM2v4BISLSC7xMrkKp0jNvt82yxj2guaKOvxURf3kZ5qUgbLO02zNfutS5/gSo1piLiK+8TK/qapY2wzwZj5FJxuqenJjTZc4i4jEvwzxqs7TbMwcYTNc/bEthLiI+8zLMO12aCDCcSTDbYNOQ2iwi4isv06vTpYlQuaCibpiXNQEqIt7yMsxL1Z55+22WoQZnmmfVZhERj3kZ5oXqdv72hx/eA9qozaIwFxE/dRTmZvY5M/ummd2+7LFbzewxM/t094ZXX7XN0uapidD4HlBNgIqIzzqtzH/LOfd2YNTMLjCzfuC9zrmrgUEzu7F7Q1yt04O2IGqzNAjzDip9EZFe0FF6OeeOm1kfsA04BVwLfNXMJoDXANd3b4irdbrOHMJdoPP5EuXKxqNIrlimL6XKXET81DQNzexDZnbfspf9ldD+IfCSc+4EsJUw1P8A+DBhyNc+z21m9riZPT45ObmuQUdtlk6XJgKrWi2aABURnyWavYNz7i7grtrHK4F+h5m9BZgEPg38OyAPTNV5njuBOwH27dvnat/ejk4P2oJlF1TkS4z0JaOxaQJURLzWdmlroQnnnAPmgEHgUWAGuA+4BXikq6Ossa42Szo6OXFpeWK+sqNUm4ZExFedpNcI8Fkz+yYwDtzrnFsA/hT4DnAp8LXuDXG1Tk9NhPqHbeUqF1NkEqrMRcRPTdsstZxzJ4GfqfP4F4AvrH9IzXV6aiLUvwe0ev+nJkBFxFNe9hWKpfXtAAWYXdZmOZktALoyTkT85WeYV3rm8VjnE6DL2yxfevJl4jHjTXtHuzNAEZEzzMswL5QdqXgMs/WH+UK+xBe+/RLvvmw7523q6+o4RUTOFC/DvFQOOmqxQNhKiceM+XzYZvm7xw8xlyvxkbfu7eYQRUTOKC/DvFgOOjpkC8DMqodtlQPHXz7yIm+c2MQVE5u7PEoRkTPHyzAvlB2JDg7ZigxlwjC//9ljHDyxqKpcRLznZZgXywGpDtsssHTY1p8//GPO29THu1+7vYujExE589peZ94LSutoswAMpRN899BJpubz/MZPvoZEB+vVRUR6iZcpViy7jnZ/RoYyCabm8/Sn4tz6pokujkxE5OzwMswL5WDdYQ7w8/t2VQ/bEhHxmZdhXlzH0kSAkb4kZvBLb9nTvUGJiJxFXvbMi+uszH/xLXu59sIxdo8NdHFUIiJnj6dh7tZVme8dH2DvuIJcRDYOj9ssXg5dROS08DIRFeYiIit5mYjF0vraLCIiG42fYR6oMhcRWc7LRAy383s5dBGR08LLRAzbLF4OXUTktPAyEYvlgIR65iIiVV6G+Xq384uIbDReJmKp7Eit49REEZGNxstEXO/ZLCIiG413YR4EjlKwvpuGREQ2Gu8SsRgEAGqziIgs410ilsoOQG0WEZFlvAvzYjmszLWaRURkiXeJWFCYi4is4l0iFtVmERFZxb8wL6kyFxGp1XEimtnlZva1Za/vN7MDZra/O0OrrxQozEVEanWUiGYWAz4CJCuv7wJ2O+euAW40s4u6N8SVCqWozaIwFxGJdJqIHwP+ctnr7wTuNrMrga3AdesdWCNLq1nUMxcRiTS90NnMPgR8aNlD/xfIOOf+2awaqFuB7wKfAm4D3l3neW6rvI2JiYmOB6yliSIiqzVNROfcXc65m6MX4BTwDjN7ALjSzH4ZmATuAD4JpIGpOs9zp3Nun3Nu35YtWzoe8NJqFoW5iEik7UR0zt3hnLvWOXcD8IRz7nPAvcBh59xTwC3Ao90d5pKoMk8l1GYREYl0pbx1zh0EDpjZY8Ccc+7pbjxvPVGY66AtEZElTXvma6m0XaL/vx24fd0jakI9cxGR1bxLxEKlZ642i4jIEu/CvKTKXERkFe8SUW0WEZHVvEvEqM2S0KYhEZEq78I8OmgrpcpcRKTKu0TUQVsiIqt5l4jaASoispp3iVgo6aAtEZFa3oV5sRyQiBnLDvkSETnneRnmarGIiKzkXSoWy04tFhGRGh6GeUAq4d2wRUROK+9SMeyZezdsEZHTyrtULJYdSR2yJSKygodhrglQEZFa3qVisRxoK7+ISA3vUrFYdjpkS0SkhodhrjaLiEgt71JRYS4ispp3qVgsO/XMRURqeJeKYWWunrmIyHLehXmhFJBQZS4isoJ3qailiSIiq3mXiqVAB22JiNTyLsyLJa1mERGp5V0qFspOPXMRkRrepWLYM1ebRURkOe/CvKRNQyIiq3iXiuERuN4NW0TktPIqFZ1zFFSZi4is4lUqlgIHQDKmnrmIyHIdhbmZHTWzByovV1Ye229mB8xsf3eHuKRYDgDUZhERqdFpKn7dOXdD5eUJM9sF7HbOXQPcaGYXdXGMVcVypTJXm0VEZIVOU/FtZvaQmX3GzAx4J3B3pUrfClzXtREuE1XmWpooIrJS0zA3sw+Z2X3LXvYDn3DOXQckgJ8kDPAC8CngNmBbnee5zcweN7PHJycnOxpsFObaNCQislKi2Ts45+4C7mrw5q8CbwAmgTuAjwIDwFSd57kTuBNg3759rpPBFktqs4iI1NN2KprZqJndWHl1H/AccC9w2Dn3FHAL8Gj3hrikGFQmQNVmERFZoZMSdxb4gJk9CFwC/B/n3EHggJk9Bsw5557u5iAjSz1zVeYiIss1bbPUcs6VCNsptY/fDtzejUE1ojaLiEh9XqXiYCbBe163g+0jmbM9FBGRntJ2ZX427R0f4I5fuOJsD0NEpOd4VZmLiEh9CnMRkQ1AYS4isgEozEVENgCFuYjIBqAwFxHZABTmIiIbgMJcRGQDMOc6OsBwfR/UbBI4uI6nGKfOyYw9TOM9vTTe00vjPb3aGe9u59yWem84K2G+Xmb2uHNu39keR6s03tNL4z29NN7Tq1vjVZtFRGQDUJiLiGwAvob5nWd7AG3SeE8vjff00nhPr66M18ueuYiIrORrZS4iIssozEVENgDvwtzM9pvZATPbf7bHUo+ZJczs1ypr6aPHen3MnzWzB83sLyqv9/R4Aczsc2b2TTO7vfL6rWb2mJl9+myPrR4zu9zMvlb5fx++vkfN7IHKy5W9PmYz+2Dle/geM+vr5fGa2cSyr+0hM/uZbozXqzA3s12Ei+avAW40s4vO9pjqSACPAd+H3h+zmQ0Af++cux44ZmbX0cPjXea3nHNvB0bN7FLgvc65q4FBM7vxLI9tBTOLAR8Bkr3+/bDM151zNzjnbgCO08NjNrMM8F7g7cAthJtwena8zrmXln1tnwKeoAvj9SrMgXcCd5vZlcBW4LqzPJ5VnHM559wBIJpZ7ukxO+cWnHP/VHl1GngzPTzeiHPuuJn1AduAS4CvmtkE8Brg+rM6uNU+Bvxl5f97+vthmbeZ2UNm9hl6f8xXA1ngH4FP0fvjBcDMLgBepkvj9S3MtwIFwr+w2wj/Ifc6L8ZsZhcDVxB+T/gw3gngh8BLQAo4BfwB8GF6aMxmth04zzn3z5WHvPh+AD7hnLuO8DfN7fT2mHcAA8C7gL348zW+FbibLo3XtzCfBO4APgmk8eP8hZ4fs5ntBH4f+DgejBfCX1WBCcLfgF4F/EfgT4A8vTXmnwXeYWYPAFcC8/jx9b2n8r9fBYr09pgXgAedcwHwIBDQ2+ONvBN4gC79m/MtzO8FDjvnniLsjT16lsfTCh/G/Bng4865WTwYr4UmXLhJYg74LjAD3Ec45kfO4vBWcM7d4Zy7ttIffQK4h97/+o4um3fYBxyjt8f8BGEwArwWOEBvjzdqsRx1zpXo0r85r8LcOXcQOGBmjwFzzrmnz/aYmun1MZvZVcANwBcr1ePl9PB4K0aAz5rZNwknu74M/CnwHeBS4GtncWxr6vXvh4pZ4ANm9iDhfMTn6eExO+deAb5jZo8ASefcg/TweCt+mvD7tmvfE9oBKiKyAXhVmYuISH0KcxGRDUBhLiKyASjMRUQ2AIW5iMgGoDAXEdkAFOYiIhvA/wcvthzl6qMzpgAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "
In this tutorial, the problem of finding a stable interface structure for Cu is solved as an example. The values that have already been evaluated are used, although the evaluation of the objective function, i.e., the structural relaxation calculation, actually takes on the order of several hours per calculation. For more information on the problem setup, please refer to the following references
+
+
+
Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).
In the following, N is defined as the number of search candidates and d is defined as the dimensionality of the input parameters.
+
+
X is an N x d matrix, where each row represents a parameter set (a d-dimensional vector) for each candidate.
+
t is a vector of N dimensions, corresponding to the negative energy of each candidate (the value of the objective function to be optimized). Normally, when we perform Bayesian optimization, we start with only X given and t does not exist. Therefore, in actual use, the value of t can only be obtained by receiving a candidate Bayesian optimization proposal and evaluating it with a simulator. Since this is a tutorial, we will skip the calculations and give t in advance.
+
+
PHYSBO assumes that the direction of optimization is “maximization”.
+
Therefore, the original problem setting is “energy minimization”, but when optimizing with PHYSBO, the objective function value is multiplied by a negative value and treated as a “negative energy maximization” problem.
Next, set test_X to the matrix of search candidates (numpy.array).
+
+
[9]:
+
+
+
# set policy
+policy=physbo.search.discrete.policy(test_X=X)
+
+# set seed
+policy.set_seed(0)
+
+
+
+
When policy is set, no optimization is done yet. Execute the following methods on policy to optimize it.
+
+
random_search.
+
bayes_search.
+
+
If you specify the simulator and the number of search steps in these methods, the following loop will be executed for the number of search steps.
+
+
Select the next parameter to be executed from the candidate parameters.
+
Execute simulator with the selected parameters.
+
+
The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step. See the section “Searching for multiple candidates at once” for details.
+
Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the “Running Interactively” section of the tutorial.
Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search.
+
argument.
+
+
max_num_probes: Number of search steps.
+
simulator: The simulator of the objective function (an object of class simulator).
When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.
+
0020-th step: f(x) = -1.048733 (action=1022)
+ current best f(x) = -0.963795 (best action=5734)
+
Next, we run the Bayesian optimization as follows.
+
argument.
+
+
max_num_probes: Number of search steps.
+
simulator: The simulator of the objective function (an object of class simulator).
+
score: The type of acquisition function. You can specify one of the following
+
+
TS (Thompson Sampling)
+
EI (Expected Improvement)
+
PI (Probability of Improvement)
+
+
+
interval:
+The hyperparameters are trained at the specified interval.
+If a negative value is specified, no hyperparameter will be learned.
+0 means that hyperparameter learning will be performed only in the first step.
+
num_rand_basis: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used.
The search result res is returned as an object of the history class (physbo.search.discrete.results.history).
+
The following is a reference to the search results.
+
+
+
res.fx : The history of evaluated values of simulator (objective function).
+
res.chosen_actions: The history of action IDs (parameters) when the simulator was evaluated.
+
fbest,best_action=res.export_all_sequence_best_fx(): The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.
+
res.total_num_search: Total number of simulator evaluations.
+
+
+
Let’s plot the objective function value and the best value at each step.
+
res.fx and best_fx should range up to res.total_num_search, respectively.
+
+
+
+
\ No newline at end of file
diff --git a/manual/v2.0.2/en/notebook/tutorial_basic_org.ipynb b/manual/v2.0.2/en/notebook/tutorial_basic_org.ipynb
new file mode 100644
index 00000000..26238c42
--- /dev/null
+++ b/manual/v2.0.2/en/notebook/tutorial_basic_org.ipynb
@@ -0,0 +1,565 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Basic usage of PHYSBO\n",
+ "\n",
+ "## Introduction\n",
+ "\n",
+ "In this tutorial, the problem of finding a stable interface structure for Cu is solved as an example. The values that have already been evaluated are used, although the evaluation of the objective function, i.e., the structural relaxation calculation, actually takes on the order of several hours per calculation. For more information on the problem setup, please refer to the following references\n",
+ "\n",
+ "- S. Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).\n",
+ "\n",
+ "---\n",
+ "\n",
+ "Let's try each step using the sample data.\n",
+ "\n",
+ "First, we will import PHYSBO."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:05.943971Z",
+ "start_time": "2020-12-04T06:02:05.507138Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import physbo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preparation of search candidate data\n",
+ "\n",
+ "First, load the data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:05.950047Z",
+ "start_time": "2020-12-04T06:02:05.945622Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "\n",
+ "def load_data():\n",
+ " A = np.asarray(np.loadtxt('data/s5-210.csv',skiprows=1, delimiter=',') )\n",
+ " X = A[:,0:3]\n",
+ " t = -A[:,3]\n",
+ " return X, t"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.105894Z",
+ "start_time": "2020-12-04T06:02:05.961463Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "X, t = load_data()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the following, N is defined as the number of search candidates and d is defined as the dimensionality of the input parameters.\n",
+ "\n",
+ "X is an N x d matrix, where each row represents a parameter set (a d-dimensional vector) for each candidate. \n",
+ "t is a vector of N dimensions, corresponding to the negative energy of each candidate (the value of the objective function to be optimized).\n",
+ "Normally, when we perform Bayesian optimization, we start with only X given and t does not exist. Therefore, in actual use, the value of t can only be obtained by receiving a candidate Bayesian optimization proposal and evaluating it with a simulator. Since this is a tutorial, we will skip the calculations and give t in advance.\n",
+ "\n",
+ "**PHYSBO assumes that the direction of optimization is \"maximization\".** \n",
+ "\n",
+ "Therefore, the original problem setting is \"energy minimization\", but when optimizing with PHYSBO, the objective function value is multiplied by a negative value and treated as a \"negative energy maximization\" problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.115603Z",
+ "start_time": "2020-12-04T06:02:06.107365Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[0. , 1. , 0. ],\n",
+ " [0. , 1. , 0.1],\n",
+ " [0. , 1. , 0.2],\n",
+ " ...,\n",
+ " [8. , 1.5, 3.4],\n",
+ " [8. , 1.5, 3.5],\n",
+ " [8. , 1.5, 3.6]])"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.120704Z",
+ "start_time": "2020-12-04T06:02:06.116918Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([-1.01301176, -1.01487066, -1.02044168, ..., -1.11680203,\n",
+ " -2.48876352, -2.4971452 ])"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "t"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To scale the search parameters, standardize each column of X so that the mean is 0 and the variance is 1, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.127129Z",
+ "start_time": "2020-12-04T06:02:06.121967Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "X = physbo.misc.centering( X )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.132603Z",
+ "start_time": "2020-12-04T06:02:06.129255Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[-1.71079785, -1.46385011, -1.68585446],\n",
+ " [-1.71079785, -1.46385011, -1.59219588],\n",
+ " [-1.71079785, -1.46385011, -1.4985373 ],\n",
+ " ...,\n",
+ " [ 1.71079785, 1.46385011, 1.4985373 ],\n",
+ " [ 1.71079785, 1.46385011, 1.59219588],\n",
+ " [ 1.71079785, 1.46385011, 1.68585446]])"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Definition of simulator\n",
+ "\n",
+ "Define the simulator class called in PHYSBO. \n",
+ "The return value of the `__call__` method is the value of the objective function when action is given. \n",
+ "action is the ID of the search candidate (0, 1, ..., N-1). , N-1).\n",
+ "\n",
+ "In this tutorial, we have defined a simulator that only returns the already computed value of t when action is given. \n",
+ "Please customize the simulator class if you want to apply it to other problems."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.136850Z",
+ "start_time": "2020-12-04T06:02:06.134076Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "class simulator:\n",
+ " def __init__( self ):\n",
+ " _, self.t = load_data()\n",
+ " \n",
+ " def __call__( self, action ):\n",
+ " return self.t[action]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Setting policy\n",
+ "\n",
+ "First, set the optimization `policy`. \n",
+ "\n",
+ "Next, set `test_X` to the matrix of search candidates (`numpy.array`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.149593Z",
+ "start_time": "2020-12-04T06:02:06.143075Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# set policy \n",
+ "policy = physbo.search.discrete.policy(test_X=X)\n",
+ "\n",
+ "# set seed\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When `policy` is set, no optimization is done yet.\n",
+ "Execute the following methods on `policy` to optimize it.\n",
+ "\n",
+ "- `random_search`. \n",
+ "- `bayes_search`.\n",
+ "\n",
+ "If you specify the `simulator` and the number of search steps in these methods, the following loop will be executed for the number of search steps.\n",
+ "\n",
+ "i) Select the next parameter to be executed from the candidate parameters.\n",
+ "\n",
+ "ii) Execute `simulator` with the selected parameters.\n",
+ "\n",
+ "The default number of parameter returned by i) is one, but it is possible to return multiple parameters in one step.\n",
+ "See the section \"Searching for multiple candidates at once\" for details. \n",
+ "\n",
+ "Also, instead of running the above loop inside PHYSBO, it is possible to control i) and ii) separately from the outside. In other words, it is possible to propose the next parameter to be executed from PHYSBO, evaluate its objective function value in some way outside PHYBO (e.g., by experiment rather than numerical calculation), propose it in some way outside PHYSBO, and register the evaluated value in PHYSBO. For more details, please refer to the \"Running Interactively\" section of the tutorial.\n",
+ "\n",
+ "### Random Search\n",
+ "\n",
+ "First of all, let's perform a random search.\n",
+ "\n",
+ "Since Bayesian optimization requires at least two objective function values to be obtained (the initial number of data required depends on the problem to be optimized and the dimension d of the parameters), we will first perform a random search. \n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:02:06.380266Z",
+ "start_time": "2020-12-04T06:02:06.154735Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.random_search(max_num_probes=20, simulator=simulator())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When executed, the objective function value and its action ID for each step, and the best value up to now and its action ID will be printed as follows.\n",
+ "\n",
+ "````\n",
+ "0020-th step: f(x) = -1.048733 (action=1022)\n",
+ " current best f(x) = -0.963795 (best action=5734) \n",
+ "````\n",
+ "\n",
+ "\n",
+ "### Bayesian Optimization\n",
+ "\n",
+ "Next, we run the Bayesian optimization as follows.\n",
+ "\n",
+ "**argument**. \n",
+ "\n",
+ "- `max_num_probes`: Number of search steps. \n",
+ "- `simulator`: The simulator of the objective function (an object of class simulator). \n",
+ "- `score`: The type of acquisition function. You can specify one of the following\n",
+ " - TS (Thompson Sampling) \n",
+ " - EI (Expected Improvement) \n",
+ " - PI (Probability of Improvement) \n",
+ "- `interval`: \n",
+ "The hyperparameters are trained at the specified interval. \n",
+ "If a negative value is specified, no hyperparameter will be learned. \n",
+ "0 means that hyperparameter learning will be performed only in the first step. \n",
+ "- `num_rand_basis`: Number of basis functions. 0 means that a normal Gaussian process without Bayesian linear model will be used. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.374318Z",
+ "start_time": "2020-12-04T06:02:06.382690Z"
+ },
+ "code_folding": [],
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res = policy.bayes_search(max_num_probes=80, simulator=simulator(), score='TS', \n",
+ " interval=20, num_rand_basis=5000)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Checking the results\n",
+ "\n",
+ "The search result ``res`` is returned as an object of the ``history`` class (`physbo.search.discrete.results.history`). \n",
+ "The following is a reference to the search results.\n",
+ "\n",
+ "- `res.fx` : The history of evaluated values of simulator (objective function).\n",
+ "- `res.chosen_actions`: The history of action IDs (parameters) when the simulator was evaluated. \n",
+ "- `fbest, best_action= res.export_all_sequence_best_fx()`: The history of best values and their action IDs (parameters) for all timings when the simulator was evaluated.\n",
+ "- `res.total_num_search`: Total number of simulator evaluations.\n",
+ "\n",
+ "Let's plot the objective function value and the best value at each step. \n",
+ "`res.fx` and `best_fx` should range up to `res.total_num_search`, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.620065Z",
+ "start_time": "2020-12-04T06:03:19.375626Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:03:19.774131Z",
+ "start_time": "2020-12-04T06:03:19.621947Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD3CAYAAADxJYRbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAABCdElEQVR4nO2deXgc1ZX231PVrX21Fku2Zcv7vgsbDF4wYIITSAJZIYFAiAlZHSczX74szGSyswTCfDCEZEjiSYAQmLBvtrENAWwj492WbdnyrtWyrMVaernfH9VVXd1dvUhqdftWn9/z6IEuVavq+lade+57zz2HhBBgGIZhUgMl2TfAMAzDJA42+gzDMCkEG32GYZgUgo0+wzBMCsFGn2EYJoVwJPsGIlFcXCwqKyuTfRsMwzBSsX379hYhRInV7y5qo19ZWYnq6upk3wbDMIxUENHxcL9jeYdhGCaFYKPPMAyTQrDRZxiGSSHY6DMMw6QQbPQZhmFSCDb6DMMwKQQbfYZhmBSCjT6T0qzf34iTrReSfRsMkzAu6s1ZFzP/8/4xnG7rwfdWTIJDTZ2xs+F8D8ryM5J9G3HhdFs37lxbjVGFmXjh65ejKCd9yK/p9nht+7w0nO+By+PFiIJMqArhfLcL6/Y34rU99Wjq6IVTJThUBTfNG4nPXjI62bc7YM53u9DU3oOJw3NjOv9MWzceXHcI31g+AWOKsof47qJjz6cvATy2+Sge23wE33xqB3rdnpi/J4TAf206gmt+sxnnuvqG8A7jzzuHm3HpLzfg/SNnQ37X2tUHj1eugjyv720AADS19+Krf9mOPrd3SK/3wbFWzPz3N7F+f+OQXicZvLz7DBbf+xYW37sRU3/8OpY/sAlVP1uH7/19F2oaOlCck4bsdAeaO3rx4+f34XRbd9LuVQiBbzz5IZ7cemJA3//x83vxqcfehzeG510IgR8/vxd/334KN/9+a1LbrdNvo09EDiL6NhE1x3DuGiLaQkRrIh1LFJ29bnzjyQ8HPZ1v7ujF6bZuzB1dgNf2NuDOP1fjQp876vdcHi++/9we/Pr1Ghxu6sS2Y60Bv+/qdeMnL+3D2c7eQd3fUKG/JK/sORNw/GTrBVz6yw346MPvYMOBRiSyGpvHK7DjxDm0DODf7LU99Zhanof7Pj0LHxw7hx89vyfivfe4PHh62wm8VTMwo7316Fl0uzz4+pMfojqo72Xmya0n8M2ndmBORQF+deNM3H5FJSaV5uJLiyrx/Ncvxz//z5X44+0L8D9fXoi/3LkQAPDIxtqk3e/++na8vLse//biXuw/096v77Z09uK1vfU43+1CfXtP1PPX7W/EhpomfO6SCrT3uHDL77egKYbvDSUD8fQdALYC2B3pJCKqADBGCHEpgOVENMHq2ACuP2C2Hz+Hl3fX48ltAxvhdXaebAMA/GDlVNx70yy8W9uC7/xtZ8TvnDp3AV/64zb8rfokvrp0PJwqYceJtoBzNtQ04Y/vHsOf3w+bNiNptHb1Yf2BRhAB6/c3BXg5L+46gz63F119bnz5z9X4zO/ex4mzidHJH95wGJ989D1U/Ww95v90HW57YhvaLkSfQTWc70H18XNYOaMMH58zEt9cPgHPVJ/CM9UnQ8690OfGH945iiX3bsT3/3cP7nvj0IDu9UBDB4bnpWNkQSbu+NMHONjQMaC/M1jcHi8ON3bgYIP209QxcCP0u81H8IN/7MHSSSVYe8dCfG7BaPzf66bisS/Oxw8/Og1zKgpARMb5Iwsy8bkFFXjmg5MBztf7R87iL1uOJ8RheGlXPVSFUJCVhtV/24Eel3+m3tXrDvgczN+rT8Hl0e7xcGPk/rvQ58ZPXtqPycNz8dNPzMCf71iA5o5e3PKHrWjvccWnMQOg30ZfCNEjhNgCIFrvrADwNBHNB1AKYHGYYwmjtqkTgDatH8zDtetkG1SFMGNEPj5zSQW+eOkYbDrYDLcnUB7ocXnwp3frcNN/vYcrfr0RW4+24t5PzcL3r5uCaSPysePEuYDzP6jTvL+/V59MuFQihIDLE17eeH7Habg8AquWjENDew/2nD5v/O6lXWcwb3QB3vruMvzsEzNQU9+Be17cO+T3XNvUgUc31eLqqaX48cem4dLxRdh8qBlbjkb3ot/Yp0k7180sBwB85+pJmDQ8By/tqg8591tP7cTPXjmA8SU5mD4iL+K/UyRq6tsxe1QB/nzHAmSmqbj1ia1DOqt7eMNhfOmP20KO/+dbtbjmwbdx7UPazxW/3hjTTDWYc119+NXrNbh2+nA8/sUqZKapMX3va8smQFEI//nWYQDa8/PF/96KHz2/F/e+cTDmd9PjFXj7UDO+9dQOLPzFevzi1QM4fyGyMRVC4OXdZ3D5hGI88OnZONTYiV+/XoMLfW78dv1hXPLz9Vj0q7fw3PZTIffh9Qo8te0EppRpWr5uT8Lx2w2HcbqtGz/75Aw4VQXzRhfi4c/PxeGmTrx9KKpQMmRENfpEdCsRrTf9xCrLlALoA3APgFUAhoc5Fny9VURUTUTVzc3x/YfRO6mupQsHo4zSkdh5sg1TynKNh3zWqAL0ur04drYr4LxHN9bi31/aj65eN/7l2snY+L1l+ExVBQBgbkUBdp86HzBQbKtrRW66A/Xne/D24cQ+FH967xgu++UGS4MmhMAz1Scxe1Q+7l46HqpCeHO/ZjQPNXagpqEDN8weAaeq4AuXjsHXrpyATQebsf34uZC/FY13a1vwjx2nop7n9Qr84H/3IivNgV/dNAtfvmIs7vvULBAhJg/61T31mDQ8BxNKcwAAikJYMHYYdp5sCxhwe90evHO4GbdeNgZPrboUE0pzQgb3WOhxeVDX0oUp5XmoGJaFx74wH43tvXhjX+xS0S9ePWCsQ8TChgON2HSwGQfq/RKG1yvw7PZTmD+mEI/eMg+fX1CBPrcXnb39N/pHWzohBPDZSyqQ5ojdfyzLz8AXFo7Bcx+exm/ePIhvPb0D80YX4rNVFfivTUfw0PrDUf9GbVMHlty7Ebc+sQ2bDzVjclkefv/OUSy9fyOe+GddWKdp16nzOHWuG9fPKseSSSX40qJK/PHdY1hy70Y8uP4QlkwsQWVRFr779124+fdbcbTZb9j/WduCE60XcPey8SjOSYto9I82d+K/36nDp+ePwiWVw4zj80YXAtDWkSK3r3PIHL+oPSWEWCuEuNr085sY/3YzgEcA/BBAOoCWMMeCr/e4EKJKCFFVUmKZDnrAHGnqxLjibBABr+2J/eUx4/UK7DrZhjkVBcaxaSPyAAD7gvTBrXWtmD0qH6+vXoKvXzkBFcOyjN/NHV2AbpfHGHzOdfXhYGMH7rhiLIqy0/D0ICWo/uDyePH420fR0tmH5o7Qh3Hv6XbUNHTg01UVKMhKw4LKYVjnW4x8cecZKAR8dNYI4/zbFo1BUXYaHlrfPxmkx+XB6r/txD0v7Iu6SPZM9UlsO9aKH6ycgmJf1E1WmgOjh2XhYGNknba5oxfbjrXiuhnlAcfnjS5EZ68bh5v8g8be0+3odXuxaHwRAEBVCO4BvIyHGjvgFcBUn5c4p6IAw/PS8d6RkFfAkh6XB4+/fRTfenoHdp9qi3q+xyuMZ+u57f5BdGtdK063dePWy8Zg5cxyzB5VAABwe/rfproWTZ4ZW5zT7+9+ddk4OFXCw2/VYumkEvz5jgX45Y0z8en5o/DbDYejav6/eu0gOnpceOTmedj2w6uw9o4FePmbV2D6iDz8x8v78eA662fvpV1nkKYqWDG9DADw/eumYHZFAcYUZePZr16Gx744H89+dRF+/skZ2HvmPK7/z39iwwHtWX9y6wkMy07DR2aUYXxJDg5HMPo7TrTB7RW4a+n4gOMFWU44VUKTxXum03C+Bzf913v46cv7I/4bDJShjN5ZB+CUEGIvgBsBvB/mWMI40tyJBWOH4ZIxw/rlMZk52tKJjl43ZpuM/oTSHKSpCvabPCqPV2Dv6fMBg4MZfcTXdf0PfAt7V0wsxk3zR2HDgSZLrbXH5cEtf9iCh9Yfipv++ea+RtSf167VaLHI9Ez1SaQ7FFw/WzPsK6YPx6HGTtS1dOHFXWewaHwxSnL94Y5ZaQ7cvWw83jncgm11sS9Y/nXrCTR39KKjx43a5vAvVFNHD37x6gEsGDvMmDnpTB6ei5oonv4b+xogBLByZqDRnz9G65MPj7cZx/QF1/ljNG/NqSgDMpA19do9TSnXHAQiwqLxxXj/yNmYokCO+9ZIPF6Bu/5nu+XgbOZE6wX0uLzIcCp4fudpYwb3jx2nkJ2mYsU0zeipiqa3D8zod0JVCKMKM/v93dLcDNzzsem44/KxhjSkKIRf3TQL188egfvfPBhWM99z6jzWH2jEnYvH4aOzypHu0Gbc00fk4y9fXohPzx+FRzfVhsw0vV6BV3bXY8mkYuRnOgEAGU4VL3z9cjx39yJU+TxyRSHcsnAM3vzOEowtycada6tx3xs1WHegEZ+aPwrpDhUTh+fgcGNH2HewyyeXFWQ5A44TEUpy0sOuo3i9At/9+070ub249bIxMf5r9o+4GX0iKiCih/TPQojjALYQ0VYAHUKIfVbH4nX9aJzr6sPZrj5MKM3BdTPLcLCxI2DqFiu6kZ5rMuZOVcGkspyASIAjzZ3o6vNg1qgCWDGqMBPFOWkBRj/NoWDWqHx8pqoCbq/Ac9tPh3zvmeqTeLf2LB5afxir/7azX+Gi4fjTe3VI903Pgz2QHpcHL+w8jetmlBkvyjXTNFXu/jcO4kTrBdwwewSCuWXhGJTkpuM36w7GdA89Lg8e23wE44q1OOZw0lBtUyc+89j76HF78YtPzgxYJASAKWW5ONbSFXEx7rW99RhXko1JwwM91NHDslCUnRZw7Q+OncO44mxjUFNVgtsbXt45de4CfvnaAXzrqR0BMtD++nZkOlWMMc32Fo0vwlnfDC8adS2adPiLT87AuQt9+PqTH0ZcWzjYoD2LX1k8Di2dfXj7UDN6XB68tqcBH5lRbkiTTt+egUhtCsexlgsYPSzL+Bv95eaFo3HP9dMCpCFVIfzHDdOR5VTDyjwPrj+E/Ewnbr+8MuR3RIR7rp+GEQWZ+O4zO9Flkq22nziHhvYew3mJRnl+Jv5+1yKsnFGORzYegccr8PkF2v6CiaW5aO9xoznMmowul+Wkh26FKsnLCDtoP/FuHd6tPYsff2waxpX0fwYVCwM2+kKIq4M+twkhVgcdu18IsVAI8bNIxxKB7jmOL83BR2ZoXs5rPm+/tqkTN/9+C96tjT7V3nWqDbnpDowP6pBp5XnYf6bdGPl3+SJ8Zofx9IkIcyoKseOkZmC21bViTkUB0h0qJpTmYEHlMPztgxMBnkSPy4NHNx5B1ZhC/OtHJuOFnWfwhT9sHVS8/97T5/HBsXP4ku8FCg4ne7e2Be09btw0f5RxbFRhFqaPyMMre+qRpiq41vfvaSYzTcXXlo3HlqOtxvQ4En/ZchzNHb34xY0zUZjlxIcWRn/zoWZ88tF30dnrxlNfWWjo8WYml+XBK8Ivsl3oc2PL0VZcO70sZMAgIswdXWgssHu9AtuPt6KqstA4xxlG3jl+tgtf/Z/tWHLvRvxu81G8uOsMqk1tqGlox+SyXCiK/5qLJhQDAN6z2PcQjG70V84sx69vmoVtda24/83wA2pNQweIgK8sGYdh2Wl47sNTWLe/ER29btw4b6RxnkP1efoDkKyOtnShsigr+on9pDA7DXdcMRav7KkPWI8AtPW0t2qasGrJOORmOC2/n5vhxAOfno3jrRfw81cPGMdf2nUG6Q4FV00NWUoMS2aaiv/8/Fx8/7op+ObyCRjrc0r0Z6+20fo56+p1Q1XIcKbMlOamWxr9/Wface/rB3HNtOH4/IKKkN/Hi5TZnKUbgQklOSjPz8ScigK8vrcBb/sMyXtHzuL37xyN+nd2nmzDrIr8gJcX0Iz+2a4+w1PWBwfdc7Vi7ugCHG3uwum2buw9046FY/0LPp+9pALHzl4IkKGeqT6JhvYefOeaSfjasgl4+PNzsfNkW8SXP5iXd5/Bd/62E8d9i85/eu8YstJU3L10PBQK9fSP+WSF6SPyA47r3v7SySXGDCCYzy8YjcqiLNy5thr3vLA3bJhad58Hj20+ikXji3DpuCLMHV2ID4Mim9btb8Ttf9yGkQWZeP7rlxtySzCTfZp5OIlH30QWzljNG1OAoy1dONfVhyPNnTh3wWVM+wHAoVrLO09uO4E39zfgq0vHY/2aJUhzKEaEkBACNQ0dmFoeuINzZEEmKouy8F4Mzsaxli4U56QjN8OJj88Zic9UjcIT/6zDsZYuy/MPNnSgsigbeRlO3DB7BNbvb8La94+hLC8Dl44r8rfH9xxHmjU0tffg44+8iyOmmbEQAsfPdg1Iz4+FO68Yh9x0R8i60IPrDqEwy4nbFlVG/P7CcUX4yuJxeHLrCVz9m8246oFN+NsHJ3HV1FJL7zsSikL46tLx+O6KycaxiT6jH07X7+r1ICtNDXEsAKAkNz3kPfN6BdY8sxP5WU786sbQGWw8SSmjn+FUMLJA0x9XzizDntPncfufPsDIgkx8Ys4IvFvbEjHkq8flQU19h6VOP81nFHWJZ/ep85g5KnRwMDN3tPZ39GiDBSaj/9FZ5Zg+Ig/f/ttObDzYZHj5CyqHGYuKN8wegSsmFOP9o9E9RZ2nt53EP3acxjUPvo17X6/Bi7vO4KZ5o1CQlYbinPSQqIL6tm5kOBUUBmmTH51ZDodCIZq6mQynihe/eQVuu6wSf9lyHFc9sNlStvnr1uNo6ezF6qsnAQDmjS7AkeaugHj7//7nUYweloXn7l6EUYXhvcvKoiykORRD3gimvVubdocbqIy1lpPn8MEx7V7N0RcOxVre6XV5kZ3uwL9+ZAomlOZiycRivLlP26jW0N6DtgsuTPXp+WYWTSjG1rrWqBFBdS1dGFvsb/f3rp0Mp6rgV6/VWJ5f09CByb40AZ+aPwp9Hi8+OHYOn5g70tDxtfZoJiBSpMire+qx62RbwIytqaMXF/o8AfcUT/KznPjy4rF4Y18j9p4+j44eFx7bfASbDzVj1ZLxMRnu766YhDsuH4tJw3MwpSwP104vw9eWxWdrUEluOvIyHGFnlJ297rD3WJqbjtauvoAd4I0dPahp6MDXlo0f8nQgtjT6mw424cr7N6HVJHvUNnViXHGOYYSvm1GONIeCKyeX4tm7F+H2y8fC5RFGKKLOz1/Zj0c21hoLs26vMCIezOhe3P76dvS6PThQ3x5Wz9eZNaoACgFPbTsBVSHD4ACawfzrnQsxsTQHd63djn99djca2nuw+uqJAV7AJWOH4WhzV8w7UmsaOnDVlFKsmDYcj246gj63F7ct0haMhudloDFogan+fA9G5GeGeB4Th+di+4+uMTz+cORlOPHvN0zHC1+/Aj19HjxlEZW0+VAzppbnGYOe3/C2AdAWl7fWteLjc0YiO8rL7lAVTCzNCevp67ONvDDSwOxRBVAVwvbj51B9rBXFOWkBswKHSpaevtvrNbxmAFgxrQyn27qx70y7fxG3zMLojy9CZ68bu037HqyoO9tlSAuAthB699LxeH1fA7YGDfrdfR4cO9tlzHqmj8gzYsvN0o7eHgDGhiMrNtQ0AQB2nfTf49FmbYZRGWEmO1juuGIs8jOd+MaTH+KyX76FX71Wg8vGFcW8wJnuUHHP9dPw6C3z8cgt8/Dw5+dixsj86F+MASLChNKcgEgvMxf63GGf1dJcLXfV2S7/O3v6nJaeYewQ/nvq2NLoF+eko66lC2/u8xvwI82dARpwxbAsbPvBVfj9rfORk+7ArFH5GFmQiVf3+DfnbDl6Fr9/pw73vXEQX/rjNrzle/jn+Dx0M7kZTowpysL+M+04UN8Bl0dgTkXkBywn3YFJw3Nxoc+DGSPzQx6Sgqw0/PXOhZhclosXd53BgsphuGx8UcA5uiQUy7b+s529aOnsxWXji/D/bp6Hv965EA98ejYmlGoGoTQ31NM/c74b5QXWCdbys6wNpxUzR+VjWE6apUfb5/YiL8Pf9tkV2mCo6/qv7qmHEMD1s8tDvmvF5LLcsLH67d0+ox/G089MUzGtPA8fHm/DB8dbcUnlsIABz6EocHtFSNSG2yMCEqldNbUUCgFv7mvAAd+sQzfCZi7zSS2RJJ6OHheaO3pDDOydi8ehPD8DP3/1QEAE0OGmDggBw9ATEdZcMwm3X16JSUFJwnRPP9xMo7PXja2+zW76TnQAxp6UoTRSeRlOfOPKCTh1rhtXTS3FC1+/HE+tujTqwJ8oJpbmRvD0PWHvUw8KML9rp3xGP9IsNl7Y0uhPH5GHMUVZeNWnh3f3eXC6rTtk4a8gK814oYkIK2eW4Z+1LTjvMwy/XX8YJbnp+OnHp2NrXSse3XQEIwsyjZE6mGnledhf324s4kbz9AFgrs+rXWBaLAy+x798eSFuWTgaP/n49BCPe+bIAqQ7FGyri74RSjeEuvG5fEJxwAJtaV5GSChZfVsPyvP7H5JnhUMhuCxkBI9XGB4nAGSnOzClLM/Q9V/adQZTy/OMwSkaU8py0dTRa7nA3d6jyTvhPH1Ak5e2Hz+Hk63dAXq+3gb9ns24vQJOk6dflJOOSyqH4Y19jThQ34GRBZmWklJRTjqmludFXMzVwzWD14cy01T8y7WTsfvUebywyx/pVRPUzwCwYnoZ/u366SF/W/93Dyfv/PNwC/o8Xlw9tRSn27qNBci6li6kORSMiNOzEY47F4/F3p9ci99+bm7YoIhkMXF4Dlo6+yyfs65eN3LSrXcol+pG36Tr64nYdPl5KLGl0SciXDejHO/VtqDtgrYgJwQsoz3MrJxZDpdHYN3+Rmyra8X7R8/iriXj8MXLKvG/dy/C+JJsXD21NOz3p5Xnoa6lC+8daUFxTjrKY0hBrOv6l1RaL0wCmkf980/OtNSE0xwK5lQUGHH+kdBDA608TkB7GM929RmLei6PF40dPRgRp1TKDkWBx1IaEVCVwEdx3pgC7DzRhhNnL+DDE20xe/mAFsEDWC/m+j398N7ivDGF6PP9G1wSNBg7jBDHYE8/NGXytdO10OB3DjeHLOKaWTS+CNXHz4UNMz3aEl5K+cSckZgxMg+/XX/Y8PYPNnQgw6nElMbXqcs7YYz+WzWNyM1w4I4rxgLwR6XV+SJ3Iq1ZxQMiQoYztvQOiWa8HsFjEfrd1etGdloYeSdPM/rmCJ5T5y6gKDst5lQWg8GWRh/QFhrdXoE39zcaUQfBYZbBzKkoMCSe3244hOKcdNyyUNMPZ4zMx4bvLsO/3xDqLenoO3M3HGjCnIr8mFbgPzarHD9cORVXTgk/mERjwdhh2HfmfNSt9AcbOlCY5URJmIWi4XkZEALG+kBjew+EAMrj5H2E283q9noDvGRA0/W7+jx40Be98bGZscVWA/5drzUWi7n6LC5cuJ9+bQDI8kk9ZsJFu7iCZiuAtokNANouuCz1fJ3LJxShz+3FljAL8nqETqWFEdcjS46dvYD1voXWmoZ2TBqeG7BgGw7VWMgNlXe8XoG3apqxdFIJ5lRoax27fLuBNaOf/NzwycSI4LEI2+zsDa/pF2Xrnr5/Vn3qXPeANrkNBNsa/Rkj8zCqUDPgtU2dUAiojBJpoM0QyrDpYBPerdW8/OCRN5Ih142+2ytiknYAbffqV5aMG/AGF0CbJXgFAmLbH91Ui7XvHws4r6ahA5PLcsO2oTRIa9R36cYyY4kFh0qWxsXtESEGSje8/9hxGrMrCjC6H/HgJbnpKMxyWur67T0u5KY7IhrEUYWZKM1Nx7zRhSHeezg5xO3xwhk0WxlVmIUZI7VnwmqWpqPvaH504xHLHZ51LV0YkZ8R1uP9yPQyjCzIxB/+WQdAG9wnx1jgwz+IhV53z+nzaOnsxdVThyMrTVt/0nMTnTh7AWNLUtvoj8jPRKZTtdT1u3rdyA4j76Q5FAzLTguRd0ay0R8cRISPzizHu7Ut2H78HMYUZRvbtSOxclY5vAIoyk7DLZf2r7pPWV4GhmWnAQi/KWsomDemEAr5UznsPX0e971xEA9v8E/5vV6BQ40dET1Ofdqpp2I4E2edMZynH6zpA8CYIm13LABcPyt2aQfQ+n5ymXU6hvZud9hFXPP3H/vifPzk41YauPbKBBtJq4EL0Awy4HcIrMhwqvjm8gnYdqwVmy2yL9a1dEWMknGoCm6/vBLb6lqx8WATWjr7wkp4wRg7ci2M/oaaJigELJ2k5cCaU5GPXSfbcKatG30eL8amuKevKOEjeLr6wi/kAoEbtIQQOH2uOyF6PmBjow9oaXNdHoH3jpyNKu3ozBlVgGWTS/B/rpuCrDCaXDiIyJADZsUpNCwWctIdmD4iH9vqWiGEwE9f3u+Tafqw94wWZne6rRsX+jwRjcHwPM2j1z0Qw9OP08MYLm+Nx0LT13fHAtqehf4ypSxPS3IWNMi097iQmxG9X+eNLrR8ZsIt5Lq8wtDHzXz5inF44ktVUaNcPnfJaIwqzMR9bxwMuedjQeGaVnz2kgrkpDvw4+e1lNaRBnczRu4dixnYWzWNmD+mEIW6IzOqAO09bmw8qEWxJSK88GJnYmlOiKfv8njR5/YiJ4L9MG/QaunsQ6/bm5DIHcDmRn+2LwwTiL6Iq6MohD/dviDipqNIrJxZjmunDzdelERxSaWWFvjl3fXYWteKNddMAhGwsUbzHHWvNzhkz0xRdhrItCu3vq0buRmOfu9gDIeqkGWUiMtC0weAu5eNw79dP21A0UOTy7RQWD0UTqe92xXV049EOE3f47WufZuZpmL5lOjb/tMcClZfPQn7zrTjdVOo8bmuPrRdcEU1sLkZTnzukgqjvbF7+tYJ1xrO92Dv6faAe9dDlf+xQ4sUYqOvLa7Xn+8J2Gil5/uJ5OmX5Kaj2TejTmTkDmBzo69r9AAwPkH6480LR+N3X6xKyLXMLBhbiF63F99/bjcmDc/B15aNx5yKArzl88oORogV13Goim9Xrk/e8W3MiheOMMnKPGGkkfljhuH2y8cO6Fp6MrUjQZEV7T3uiOGa0XCGid5xeUTA5qyB8Mm5IzGhNAcPvHnQiJuv60c8/Jcur4SqEIqy0wKynkbCESbhmr4Ibs47NLE0F1lpKnacaEN2mhrzNeyMvlNdDxAAIidb0ynNzUBzZy+EEDh1TgvJZU0/TnyqahSG56VHDIm0A3o8eVefBz/86DQ4VAXLJ5di96k2tHT24mBjJ0YVZkb12ktN084zbeE3Zg2E8NE7oZr+YNH3UgRnQdQ8/YHPXFRD3gk0km6Pd1CL8frf/t6KSTjS3GWU9Kzrx87XUYVZuH1RZcxZJAH/zMVqEAOADNM6mKqQsaO1sjh7SPPDyII+azTnlerq1UJvo3n6Lo9A2wWXsRuXjX6cmFKWh60/uHpIt4tfDBTnpGPmyHxcPXW4sfB25ZRSCAG8fagZBxvaY4roGJ6XYSzk1p+P38YswLebNaymH18Doi+otwZtnGnvdg3S07eOdonXwHXt9DIsmVSCX7x6ALVNHTh2tguqQqiIUe/90cemRQwrDsYw+iEL09qgFtwmPe+U3d+nWNGNvpWnnxUmegcI3KB16lw38jIcg3ou+4PtjX4q8cxdl+HRW+YZn6eV56EkNx1v7GvA0eaumHRe3dPvcXnQ2tUXt41ZgGZgLDV9j9dIBxAvstJUZDiVgBq0Hq9AR687bLK1WFAV62iXeMg7gCZJ3v+pWchKc+CbT+1ETUMHRhVm9qscYX/wRyOF7jsAEDJ70fNORcoem0rkWxh9vd5wZHnHv0FLC9dMzCIuwEbfVmSmqQHGQVEIV04uwbr9jXB7RcxGv6Wz11gQjFfkDhC+AInHGx+DaYaIUJSt7TDW6dRTMAxmIVe1jnZxx3HgKs3LwH2fmoUD9e1Yt79xSDdBhU0r4RsEgiOSqioLkeFUwlaESzV0o9/ebZZ3fAu5UaJ3AG2D1ukEbswC2Ojbnisnl0J/n2MJ4yv17crd68v6OCKOmr4jgqavxlnTBzSJxyzv+DNsDlzT1zdgBbfDaq/BYLhq6nB88VJtN/hQRsmEK6Kiz2SCI5KG52Vg+4+uwfJB7CC3E7okEyjvaJp+RE/fFB596tyFhEXuAMDFka6OGTIun1hseHOxGA992qlnVIxr9E4ETT/enj4AFOWk4Wyn3+ifj5JhMxbC1ZR1eQe/kBvMDz86FR09rpBavvFEH8SC5R0995BVKO3FkuXyYiCipx9B089JdyArTcXhRq2saiI9fe49m5OX4cRl44vQdsEVky6sb9DSjX7ZEGv6QghtETTOmj6gefrmvCjRcunHgjOsvBP/gSvDqeKhz82N698MRlEIROHlHau9B4yfNIeCTKdquZAbbXAszU03yqWy0WfiyoOfnROxHJ4ZPRXD/jPtKMpOi2uGQ03TD5VFAAyNp5+dhrNdWiw0ERlVs+IRsmm5kCupgXQqimU0EhAavcOEkpfpCDD6kerjminJTTeqs40suIgXconIQUTfJqLQJCGh5z5MRJuJ6AnTsQYi2uT7md/f6zP9R0vzHJsnUZyTDiJteh/PGH3AutSgblyGQtMvyklHj8uLC32axqpPwQcTvRNuc1Zw5SyZ0KqBBUXv+AaBNEkHskSSn+kMMfrZYerjmjHX5bjYF3IdALYC2B3pJCLKBvCsEGIpgEYiWuz71etCiGW+n+0DuD4zhDhVxUh0Fs8YfcA6n/5QevrBsfqGvBOP6B2PhbwjqVdstWnOkHckHcgSSX6m05hFAtoGyVhSl+gRPFlpKgr6UYVusPTb6AsheoQQWwCEL6qpndclhHjb97EVgB4wvYSI3iGiB4i39F2UlPg8kHjG6AN6GoYwUSJDoOnrg5cettne7QIRIibCioaRe8fC04/3Qm6icKpKyAxMb1+8N83ZEUtPvx9Gf2RBaA3qoSTqU0pEtxLRetPPmv5cgIgmApgnhNjmO7RaCLEY2oxhpcX5q4iomoiqm5ujKkjMEDDcp+uPiHMYmVXCNd3YDIWXXOQrFqNv0GrvcSM33TGoak+OMEVHhmIhN1E4lNBi7y6PF06VONVCDORlOEMWcmMx+nqkXCKlHSCGhVwhxFoAawfyx4loBIB7Adxm+nsv+v73FQBzfP81X+9xAI8DQFVVVcTZBDM06A9jPDdmAXqN3ODslEPnUVp5+oORdgD/4GRe+DQikCT19K32T8Rzs5ndyct0hoRsxiLv6LH6icq5ozPUvfoAgLuEEO0AQETDiGi573dVAGqH+PrMANDDNuMt76gKQQgE5It3J1jTH2x+E7+nH9oGq5h2GXCoiuVCrqxrFIkmP9OJjl638Ux09XqQFUOtW71saSIjd4A4Gn0iKiCih0yfFwBYBuAZX6TODQDaAdxMRJsBTAXwQryuz8SPisIsKARUDIvvw2gV+eL39OPvfwTn39GqZg0uStlqIVeXRoYiAikROFSyXKPgyJ3Y0KPBOnyBAp0xevrjSrKxZFKJkSAxUQz4DRBCXB30uQ3AatPnbQCsthLeOdBrMonhE3NHYmp5nuHxxwvVIs+LK0yOl3gQnH/nfLcrap3kaFjVlNXXJYJr5MqCQ6GQqCqZo5ESjTnTZkFWGi70xabpZzhVrL1jwVDfXghyPqXMkJLmUDBzVPzLPfpzt/u95KHU9IHA/DtxkXdUC3nHyFMjp5F0KBbRO56h2SVtR4IzbXb1Rq6Pm2y4V5mEYbWbdSg1fSAw/05cFnKNkE2/kXR55U5Z4FQpZEeuHr3DRMds9PvcXvR5vMiJkHcn2cj5lDJS4kiwpg/4PX23x4uuPk8cFnItBi6P3Au54UJpZR3EEo0/6Zo7pvq4yYZ7lUkYVrnbXWEqNMULPf9Oe8/g8+4AptmKpbwj5+vkUJXQIioS7ztINPozdb7bFXOytWQi51PKSIkaQdMfOnlHy7/TcF4rATlYT5+I4AzKVWPIO5IaSaflTmnvkFXrshtmeaerL3oBlWTDvcokDEtpJAELuQBw7KxWYHwwydZ0guUQY+CSVANXFcUigRx7+rGS6VThVEkz+kZRdNb0GSaipj9UkSL6rty6Fs3oD3YhFwhNRWxIVJJGuziV0CybfW7W9GOFiIz8O7qmH0ucfrLgXmUSRlI0fd+ux6PNutEf/MsYXOvXWMiV1NPXUiuHevqyticZ5GU40d7j4oVchjGTFE0/SN4ZrKYP6HHtoZuzZPWMreL0OfdO/9Dz73Syp88wfpKq6cdT3gleyJU8ZNMq5bXLw55+fwiWd9jTZxgkR9M38u909UFVCNkxJMKKhhqUitjIvSOr0bcoWC9zfYBkYBh9X5W2WBKuJQvuVSZhJEPT1/PvAEBehiMu+eG1oiN2kncsylhKXPM3Geh1crt63XDEUB83mVy8d8bYjmRo+oBf4omHtAOEGkk7LuT2ebzSylXJIN+k6Wenx8e5GCrY6DMJIxmaPqDl3wHis4gLWMg7XslDNi125HKWzf6Rn+mEVwBN7b0X9SIuwEafSSBWqZWHWtMHzJ5+fF7GYHnHJbmnz7l3Bo++6e/M+e6LemMWwEafSSBWRVTcQ6zpA/6wzXh6+mbPWPf0pV3ItSii4vIIlnf6gf5snWnrvqgjdwA2+kwC8Xv6ZoM59Jq+vkErXkbfqVLQYrTu6cv5OjmV0HKJbg9H7/QH3dNv6ey7qPPuAGz0mQRiVXVqqIuoAPGXd4JDHOXPvUPwBtUudklc6D0ZmIMEWN5hGB9Wmr47AZq+Lu/EI9kaoMsh5ugd2RdyQ9NFcxGV/pEfYPTZ02cYAEnU9HV5J44hm9byjpxG0r9pTusLj1dACHkHsWRgfrY4eodhfETS9NUhjGsePSwLWWkqJpTkxOXvaUVH7LU5C/D3xVBvmLMjuekO6I+w7Tx9InIQ0beJqDmGcxuIaJPvZ77v2Boi2kJEawZyw4y8hNP0FQKUIdb0d//bCiyaUByXv6d5+qG5d2TNPx+8f0I3/mmSDmLJQFHICBSwo6fvALAVwO4Yzn1dCLHM97OdiCoAjBFCXApgORFNGMD1GUkJp+knQkaIpxfuUBXL3DvSGn1d3vF5+ImQ3OyIruvHI7/TUNLvN0EI0SOE2AJARD0ZWEJE7xDRA6TtS14B4Gmf118KYHHwF4hoFRFVE1F1c3PUyQQjEeESrslmXBxK4EKuR/Y4/RB5R+6av8lCjw7Lkt3TJ6JbiWi96ac/ssxqIcRiaLODldAMfR+AewCsAjA8+AtCiMeFEFVCiKqSkpJ+XIq52HFYaPouj1c6Y+lQCB5PYHijU6WLOt9KJPyefqCmz5uz+ofu6V/s8k7UuxNCrAWwdiB/XAjxou9/XwEwB0AzgEcA3AkgG0DLQP4uIydqGE1fNlnEoSoBO1hlLziiRx3psxc3e/oDwpB3LnKjP2S9SkTDiGi572MVgFoA6wCcEkLsBXAjgPeH6vrMxYdVamW3V0CVzGA6lNAiKrJJVGaC11p04y9rCGqy8Hv6NtP0w0FEBUT0kOlQO4CbiWgzgKkAXhBCHAewhYi2AugQQuyL1/WZix81SDsGAI+EFZqCK025vV7pZitm9FmKyxPo6XMahv6hR+9c7J7+gO9OCHF10Oc2AKtNn93QZJzg790P4P6BXpeRFyIKDXf0yqfpO9XQNAwySyHGjtwgTV/mgSwZ5BnROxe30Zf3SWWkJDgXvYyafnAqYtkzUgbPwIyFXIkHsmQwY2Q+KoZloti3A/xi5eIekhjboVWdCtb05TKYTiU0947cnn5QnL7kCeSSxdJJJXjnX5dHPzHJyPukMlIS7CVrmr5cj6GqKBDCvPAp90Ju2DQMki2wM7HBvcokFK3qVGABEtk8fYeRldK/g9UpsYF0BGXZlL3mLxMZeZ9URkqCPX23hJp+8MKn2yOfRGVG9+j98g5r+naGe5VJKA6FQjZnyWYwVSVwB6vbK1/YqRnd03d5gtMwyNsmJjxs9JmEogaVGnR7EpNwLZ44g+UdyYuI6//+Ho7eSQm4V5mE4lSU0I1NknmUhhxiSlAmm0RlJnSNQu6soUxk2OgzCUUN2pwlY8hmcLSL7EXEncaOXPb0UwHuVSahqBaavmwepeEZm+LaZZutmFHVwOynHKdvb9joMwnFYaHpy5ZwLThbqOzyjjOoPW729G0N9yqTUBwWmr5skS+6MdQHL49X7tTKwZWzjELvEreJCQ/3KpNQghOuyazpm7NSyiyFhMu9I3ObmPCw0WcSiq00fVP+eZmlEGfwjlzW9G2NvE8qIyV20PT9ce0mT1+ygctM8I5cf7lEufqFiQ3uVSahBGv6Hgl3szqsFnIl9vRDQ1AFFAIUiQcyJjzyPqmMlIRq+jImXAtcyJW9cpaiEBQyFVGRXK5iIsM9yySU4CIqMiZc8+eq0QYvj+QLuYBe7N0vV7HRty/cs0xCCa4v65FS0w8qL2gDz9ihEDymHbmyD2JMeOR+UhnpUBUlJLWyfJp+YO4d2RdygcCKZi4Jk+AxsdPvniUiBxF9m4iao5w3mog2+X5OEtEnfMcbTMfnD/C+GUlxKiR9ERVzlk0hhC8Ng9xG0qkqpn0H8m2YY2JnIDVyHQC2Atgd6SQhxAkAywCAiF4D8IbvV68LIb40gOsyNsAOmr5qknd071jmwuhAYHEb2XMJMZHpt3sihOgRQmwBIKKeDICIxgE4LYTo9h1aQkTvENEDRBTyZBHRKiKqJqLq5uaIkwlGQsyavtcrIASk0/SNQuJeYQxgquRGUvP0/Zq+7GsUTHii9iwR3UpE600/a/p5jc8CeNr0ebUQYjG0GcPK4JOFEI8LIaqEEFUlJSX9vBRzsRPsUQLy7fz0e/pef2lByQauYLTB2BS9I3l7mPBElXeEEGsBrB3ENVYAuM/09170/e8rAOb4/sukCA5FCanFKpu8Y07D4LZJaUE1YCGXo3fszJAO5z5pp0EI4fZ9HkZEy32/rgJQO5TXZy4+zFEi+n+lW8g1pS3QY9ulX8g1DcYuGyxMM+GJW88SUQERPRR0+OMAXjJ9bgdwMxFtBjAVwAvxuj4jB6pJ0/dIWpZPtfD0ZV/Idaj+BXa3xyt9e5jwDCR6BwAghLg66HMbgNVBxx4M+uwGcOdAr8nIj8NS05fLq3QqoQu5srUhmIAZGO/ItTXcs0xCcfg2Z2nx7ZJr+mZ5R7I2BONQFaM/XBIWq2dih40+k1DMGR2NcEfJDKY5y6ZH0gikYBymOgccsmlvuGeZhOIvwi2vwSQiI/TUqDIleYijuc6BHdJKMOGR+0llpCPA09eNvoQGU1UILq/Xv5Ar2cAVjDmUlj19e8M9yyQUo+qUR15NH9CidQLaILmRdKp+eYfTMNgbuZ9URjqMXPQmL1k2TR/QFz6FYShlHLjMBOyU5ugdW8M9yyQU3cDLrOkD+sKn198GyY2+uYiKi7Ns2ho2+kxCsYumry98Ggu5knvGTlP2U5fHK2WfMLHBPcskFNWk6cvsJTsULSulXRZyzcVt3DYo/8iEh40+k1CcAZq+5iXLqelrWSn9i9Fyv0raQq5/cxZr+vaFe5ZJKGZNX9bUyoA/bYHLJp6+uc4Bx+nbGzb6TEIxFxX3GFk25XsM9bh23dOXcbZiRm+PXv6RPX37wj3LJBRD0w9YyJXPYOoLuX5NX+5XSZ+5GOUfJZ+5MOGR+0llpMNfgMSv6Usp7/jKC8osUZlxqArcHvtEIzHh4Z5lEop1yKZ8BlPzjE0Dl4QSlRm9PXbZbMaER+4nlZEO1TaaPvk8Y3vIIQ6V4BVAn9tX85c9fdvCPcskFIdNNH2nLw2DfXLvaPff4/IAkF+uYsIj95PKSIddNH1VIa2Iik3kEH0Gpht99vTtC/csk1DMIZuyFkYHNDnH7ZV7V7EZ/f67DaMvd3uY8LDRZxKKalrI9Uice0f1afoy7yo2o3v23X0+eUfCPmFiY0A9S0QPE9FmInoiynlriGgLEa2JdIxJHSw1fQm9Sr2mrMsr4FQJRPK1wYzKnn7K0G+jT0TZAJ4VQiwF0EhEi8OcVwFgjBDiUgDLiWiC1bHB3DwjH5aavoReslPfzGSTjJS6ke9x2SMElQmPo79fEEJ0AXjb97EVQG+YU1cAeJqI5gMoBbAY2iATfKzW/CUiWgVgFQCMHj26v7fHXOTYRdNXFcUI2ZRxphKMbuQ5esf+RB3OiehWIlpv+lnjOz4RwDwhxLYwXy0F0AfgHmhGfHiYYwEIIR4XQlQJIapKSkoG1Cjm4sWyiIqEXqXTlGVTxplKMLqR1+WdNI7esS1RPX0hxFoAa83HiGgEgHsB3Bbhq80AHgFwJ4BsAC2+41bHmBRBN/D6jlwiOT19h0rGBjPZY/QBf78YC7k2aBNjzUB79gEAdwkh2iOcsw7AKSHEXgA3Ang/zDEmhfB7+l6fHi6fwQf0IipanL5T0jaYCfb0Wd6xLwNZyF0AYBmAZ4hoExHd4DteQEQP6ecJIY4D2EJEWwF0CCH2WR2LRyMYeTCKqPi8ZBm9fEBbm/DoC7k28IodwZuzJJTcmNgYyELuNgDlFsfbAKwOOnY/gPujHWNSh+AiKjLq+QCgqgSXV8DltclCLqdhSBnkfOMYaTFr+h6JDabTV1PW7fHawit2cpx+yiD/08pIRYCmL3Hki15ExeWRV6IyY2zO6uMsm3aHe5ZJKLqRd3m0qlOyGkyzBm4HrzhU3mHTYFe4Z5mEoigEheTX9M1G0g4G0hkUvWOHiCTGGvmfVkY6HIoivabv9/TllajM+OUd9vTtDvcsk3BUhXyavl3kHflfI6OIipujd+yO/E8rIx0OlXyavrxesu4Jd7s8tjCQjiBPn9Mw2BfuWSbhGBubvELK+rhAoKcv68BlJiThmg3axFgj5xvHSI1q0vRljXzxL+TaI7VycBoGWWU3JjryP62MdDhsoOkb+efdNpF3VL+8Y4eiMEx42OgzCUfPUCmzpq8PVkLYYyOTX96xx8yFCQ/3LpNwHHrVKYk9fbNhlHXgMqN7+n0ery1mLkx42OgzCUf1LeRqmr6cj6DZ0Nshpt2cP4gjd+wN9y6TcLTNWXJr+mZv2A6evhowiMnfHiY8bPSZhKMq8mv65hmKHYykM2AQY7NgZ7h3mYSj1ZeVu4iK+b5llajMEJHRJlnDaJnYkP9pZaRDNW3OktWrdNpM3gH8A5kd1iiY8HDvMglH1/TlTrhmlnfs8RrpmTXtMogx1tjjaWWkQtf0XR6vPeQdSdsQjD54pTnYLNgZ7l0m4ThMmr6sXqVZx1clna0E42BPPyUYkNEnooeJaDMRPdHf84iogYg2+X7mD+T6jNzYIeFaoKcvZxuC0aU2u8hVjDX97l0iygbwrBBiKYBGIlrcz/NeF0Is8/1sH/CdM9Jih4RrAQu5krYhGH2dQtY+YWKj30ZfCNElhHjb97EVQG8/z1tCRO8Q0QPEWZ1SEj3hmsyavkO130Ku4enbZObCWBO1d4noViJab/pZ4zs+EcA8IcS2KN8PPm+1EGIxAAeAlRbnryKiaiKqbm5u7neDmIsfPeGazJq+w44LuRynnxI4op0ghFgLYK35GBGNAHAvgNsifdfqPCHEi77/fQXAHN9/zdd7HMDjAFBVVSWi3R8jH4EJ1+T0Ks1GX9bZSjD64rQdNpsx4Rlo7z4A4C4hRHt/ziOiYUS03Pe7KgC1A7w+IzGqopgSrslpMM2Sjl2MJG/OSg0GspC7AMAyAM/4InBu8B0vIKKHopzXDuBmItoMYCqAFwbdAkY6HArB5fFKnYYhMMumnG0IRjf2dpGrGGuiyjvB+LT5covjbQBWRzsPwJ39vSZjLxwqodft1f5fUgPjsGGCMmNHrk0GMcYaezytjFQ4FDIKcMur6ZvlHXsYSZZ3UgPuXSbhqIpiePqyGkxVIegBx3Yxkk6Wd1ICezytjFSY5QNZNX3AvxNXVokqGL1f7LIwzVjDvcsknIAqTRIbTNVmuWocLO+kBNy7TMJxBsS4y/sI2i1XDadhSA3s8bQyUqEG5KKX18D4NzPJ2wYznIYhNeDeZRKOXYqK++Ude7xGDg7ZTAns8bQyUqHaJIWB02a5ahw2m7kw1rDRZxJOwG5Wib1kvXiKzAOXGSdH76QE3LtMwrFLCgOnsfBpj9eIN2elBty7TMJRzbnoJfaS/dE78rbBjBG9I3GfMNFho88kHLukJVaNzVn2eI2cNgtBZazh3mUSjl00fb8GLu/AZUblOP2UQN43jpGWgJBNiQ2M3XawOjlOPyXg3mUSTsDmLInlHYfdcu+wp58SsNFnEo5dNH3/DlZ522CGE66lBty7TMKxi6avD1gyD1xmeEduaiDvG8dIi100faeqwKkSiORtgxl9bULmgZiJDvcuk3Dso+mTrQykw2ZpJRhr7PPEMtJgJ01f5plKMKzppwYD6l0iepiINhPRE1HOayCiTb6f+b5ja4hoCxGtGci1GflRbaLpOxRF6plKMEYlMBsNZEwo/X7jiCgbwLNCiKUAGolocYTTXxdCLPP9bCeiCgBjhBCXAlhORBMGeN+MxJjlA1ViA6N5+vIOWsGoCnv6qUC/e1cI0SWEeNv3sRVAb4TTlxDRO0T0AGmrXSsAPO3z+ksBRBowGJti1vRlzvMypSwX00fkJfs24sa4kmwU56ShNDc92bfCDCFRjT4R3UpE600/a3zHJwKYJ4TYFuHrq4UQiwE4AKyEZuj7ANwDYBWA4RbXW0VE1URU3dzcPIAmMRc7dtH0Vy0Zjz/dviDZtxE35o4uRPWPrkFBVlqyb4UZQhzRThBCrAWw1nyMiEYAuBfAbVG++6Lvf18BMAdAM4BHANwJIBtAi8V3HgfwOABUVVWJaPfHyIddNH2GkZGBvnEPALhLCNEe7gQiGkZEy30fqwDUAlgH4JQQYi+AGwG8P8DrMxJjF02fYWRkIAu5CwAsA/CMLyrnBt/xAiJ6yHRqO4CbiWgzgKkAXhBCHAewhYi2AugQQuwbbAMY+bBLnD7DyEhUeScYn4ZfbnG8DcBq02c3NBkn+Lz7Adzf3+sy9iEwDQMbfYZJJCyoMgnHLoXRGUZG2OgzCcdhKihul7w1DCMLbPSZhGO3PPQMIxNs9JmEo0s6bPQZJvGw0WcSjsNmeegZRibY6DMJx6g4xTleGCbh8FvHJBzW9BkmebDRZxKObuvZ6DNM4mGjzyQcIoJDIU7BwDBJgI0+kxRUm5UaZBhZ4LeOSQpO1V5VpxhGFtjoM0lBVYhDNhkmCbDRZ5KCQ7FXUXGGkQU2+kxS0Dx9fvwYJtHwW8ckBaeqSF0fl2FkhY0+kxRY02eY5MBGn0kKrOkzTHJgo88kBdb0GSY58FvHJAUHx+kzTFLod41chokHX1s2HnmZzmTfBsOkHAMy+kT0MIDZAI4IIe4Ic85oAGt9H8cD+KYQ4nkiagBQ4zv+XSHE9oHcAyM3188ekexbYJiUpN/yDhFlA3hWCLEUQCMRLbY6TwhxQgixTAixDMBeAG/4fvW6fpwNPsMwTGLpt9EXQnQJId72fWwF0BvpfCIaB+C0EKLbd2gJEb1DRA8QV8VmGIZJKFGNPhHdSkTrTT9rfMcnApgnhNgW5U98FsDTps+rhRCLoUlLKy2ut4qIqomourm5OfaWMAzDMFEhIUT/v0Q0AsAjAG4TQrRHOXcjgGuEEO6g4ysAzBFC3Bvuu1VVVaK6urrf98cwDJPKENF2IUSV1e8GGrL5AIC7YjD44wA06AafiIYR0XLfr6sA1A7w+gzDMMwAGMhC7gIAywA8Q0SbiOgG3/ECInoo6PSPA3jJ9LkdwM1EtBnAVAAvDOSmGYZhmIExIHknUbC8wzAM03+GQt5hGIZhJOSi9vSJqBnA8QF+vRhASxxvRxZSsd2p2GYgNdudim0G+t/uMUKIEqtfXNRGfzAQUXW46Y2dScV2p2KbgdRsdyq2GYhvu1neYRiGSSHY6DMMw6QQdjb6jyf7BpJEKrY7FdsMpGa7U7HNQBzbbVtNn2EYhgnFzp4+wzAMEwQbfYZhmBTClkafiNYQ0RY9I6jdIaKHiWgzET3h+5wS7Sei2UT0mu//U6XNX/D19YtElJkK7SaiHxDR20T0HBE5iOizRLSViO5L9r3FE1/bvu3bn6QfC+nfwfa57Yw+EVVA25hwKYDlRDQh2fc0lIQpamP79hORAuDLAJyp0udElAHgegBXArgR2oYdW7ebiNIAXCqEWALgEIAFAK4XQiwEkGNK4GgHHAC2AtgNWNuyeDzrtjP6AFYAeJqI5gMoBWBZ2csuWBS1WYTUaP9XAPzR9/+p0ucLAXRDq0J3D1Kg3UKIPgBpRDQHwCQA5QBe8ZVjnQJgaRJvL64IIXqEEFsA6NE1Vv076D63o9EvBdAH7aVYBWB4cm8nMehFbaD1qa3bT0RlAEYKIXb4DqVKn5cDyAZwLYCxSJ12vwPgXgBNANIAnAfwGwC3wb5tBqz7d9B9bkej3wytwMsPAaQjBfJ0+Ira3AvgLqRG+28CcA0RbQIwH0An7N9mAOgCsFkI4QWwGYAXNm83EV0CIE8IsQLAUQAeAD8H8DtopVpt12YTVu/yoN9vOxr9dQBOCSH2QtM930/y/SQCc1Eb27dfCPGIEOIyIcQyANsBvAibt9nHdmjTewCYDmAL7N/uMgB61b2zAEoAnAOwHlqb30vSfSUCq3d50O+37Yy+EOI4gC1EtBVAhxBiX7LvaSgJLmoDYDZSqP1A6vS5EOIMgA+I6D0ATiHEZti/3a8BqPQVXroBwJ8A/B7ABwCm+X5vS6ye63g867wjl2EYJoWwnafPMAzDhIeNPsMwTArBRp9hGCaFYKPPMAyTQrDRZxiGSSHY6DMMw6QQbPQZhmFSiP8PgrkiCTjeWLEAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "
where \(y_1\) and \(y_2\) have minimums at \(x_1 = x_2 = \cdots x_N = 1/\sqrt{N}\) and \(x_1 = x_2 = \cdots x_N = -1/\sqrt{N}\), respectively, both of which are 0. Also, the upper bound is 1.
+
Since PHYSBO solves a maximization problem, the objective function is again multiplied by -1.
+
+
Refernce
+
+
Van Veldhuizen, David A. Multiobjective evolutionary algorithms: classifications, analyses, and new innovations. No. AFIT/DS/ENG/99-01. AIR FORCE INST OF TECH WRIGHT-PATTERSONAFB OH SCHOOL OF ENGINEERING, 1999.
Let’s plot each of the two objective functions. The first objective function has a peak in the upper right corner, and the second objective function has a trade-off with a peak in the lower left corner (The star is the position of the peak.).
As with the usual usage of physbo.search.discrete.policy (with one objective function), optimization is done by calling the random_search or bayes_search methods. The basic API and usage are roughly the same as discrete.policy.
A solution that is not a Pareto solution, i.e., a solution \(y\) for which there exists a solution \(y'\) that is better than itself, is called a inferior solution (\(\exists y' y\prec y'\)). The volume of the inferior solution region, which is the space occupied by inferior solutions in the solution space (a subspace of the solution space), is one of the indicators of the results of multi-objective optimization. The larger this value is, the more good Pareto solutions are
+obtained.res_random.pareto.volume_in_dominance(ref_min,ref_max) calculates the volume of the inferior solution region in the hyper-rectangle specified by ref_min and ref_max.
For bayes_search in the multi-objective case, score can be selected from the following method
+
+
HVPI (HyperVolume-based Probability of Improvement)
+
EHVI (Expected Hyper-Volume Improvement)
+
TS (Thompson Sampling)
+
+
The following 50 evaluations (10 random searches + 40 Bayesian optimizations) will be performed with different scores.
+
+
HVPI (HyperVolume-based Probability of Improvement)
+
The improvement probability of a non-dominated region in a multi-dimensional objective function space is obtained as a score.
+
+
Reference
+
+
Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. “Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.” Journal of Global Optimization 60.3 (2014): 575-594.
The expected improvement of the non-dominated region in the multi-dimensional objective function space is obtained as score.
+
+
Reference
+
+
Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. “Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.” Journal of Global Optimization 60.3 (2014): 575-594.
In Thompson Sampling for the single objective case, at each candidate (test_X), sampling is performed from the posterior distribution of the objective function, and the candidate with the largest value is recommended as the next search point. In the multi-objective case, one candidate is randomly selected as the next search point from among the candidates with the maximum value based on the Pareto rule for the sampled values, i.e., the Pareto-optimal candidates.
+
+
Reference
+
+
Yahyaa, Saba Q., and Bernard Manderick. “Thompson sampling for multi-objective multi-armed bandits problem.” Proc. Eur. Symp. Artif. Neural Netw., Comput. Intell. Mach. Learn.. 2015.
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "# plot objective 2\n",
+ "plt.figure()\n",
+ "plt.imshow(simu.t[:,1].reshape((101,101)), vmin=-1.0, vmax=0.0, origin=\"lower\", extent=[-2.0, 2.0, -2.0, 2.0])\n",
+ "plt.title(\"objective 2\")\n",
+ "plt.colorbar()\n",
+ "plt.plot([-1.0/np.sqrt(2.0)], [-1.0/np.sqrt(2.0)], '*')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Performing optimizations.\n",
+ "\n",
+ "## Setting policy\n",
+ "\n",
+ "Use `physbo.search.discrete_multi.policy` for multi-objective optimization. \n",
+ "Specify the number of objective functions in `num_objectives`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.163097Z",
+ "start_time": "2021-01-05T06:06:14.159742Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As with the usual usage of `physbo.search.discrete.policy` (with one objective function), optimization is done by calling the `random_search` or `bayes_search` methods. The basic API and usage are roughly the same as `discrete.policy`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Random search"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.316770Z",
+ "start_time": "2021-01-05T06:06:14.164245Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "\n",
+ "res_random = policy.random_search(max_num_probes=50, simulator=simu)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The evaluation value of the objective function (the array) and the action ID at that time are displayed. \n",
+ "It also displays a message when the Pareto set is updated.\n",
+ "\n",
+ "If you want to display the contents of the Pareto set when it is updated, specify `disp_pareto_set=True`. \n",
+ "Pareto set is sorted in ascending order of the first objective function value. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.493398Z",
+ "start_time": "2021-01-05T06:06:14.318132Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "res_random = policy.random_search(max_num_probes=50, simulator=simu, disp_pareto_set=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Checking results\n",
+ "\n",
+ " #### History of evaluation values"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.498984Z",
+ "start_time": "2021-01-05T06:06:14.494679Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "res_random.fx[0:res_random.num_runs]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Obtaining the Pareto solution"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.504080Z",
+ "start_time": "2021-01-05T06:06:14.500385Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(array([[-0.95713719, -0.09067194],\n",
+ " [-0.92633083, -0.29208351],\n",
+ " [-0.63329589, -0.63329589],\n",
+ " [-0.52191048, -0.72845916],\n",
+ " [-0.26132949, -0.87913689],\n",
+ " [-0.17190645, -0.91382463]]),\n",
+ " array([40, 3, 19, 16, 29, 41]))"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "front, front_num = res_random.export_pareto_front()\n",
+ "front, front_num"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Plotting the solution (evaluated value)\n",
+ "\n",
+ "Note again that the space to be plotted is $y = (y_1, y_2)$ and not $x = (x_1, x_2)$.\n",
+ "\n",
+ "The red plot is the Pareto solution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.511086Z",
+ "start_time": "2021-01-05T06:06:14.505221Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "def plot_pareto_front(res):\n",
+ " front, front_num = res.export_pareto_front()\n",
+ " dominated = [i for i in range(res.num_runs) if i not in front_num]\n",
+ " points = res.fx[dominated, :]\n",
+ "\n",
+ " plt.figure(figsize=(7, 7))\n",
+ " plt.scatter(res.fx[dominated,0], res.fx[dominated,1], c = \"blue\")\n",
+ " plt.scatter(front[:, 0], front[:, 1], c = \"red\")\n",
+ " plt.title('Pareto front')\n",
+ " plt.xlabel('Objective 1')\n",
+ " plt.ylabel('Objective 2')\n",
+ " plt.xlim([-1.0,0.0])\n",
+ " plt.ylim([-1.0,0.0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.661288Z",
+ "start_time": "2021-01-05T06:06:14.512392Z"
+ },
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcoAAAG5CAYAAAAOKnSzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAkTElEQVR4nO3de3QkZ3nn8e+jscdG2MSegRhzkcTFXEyGEKyYWwgcYnNINmDD4RJWAfuQMJtlycnZhBBnxUKWRIFwyW0J50SB4AvKcrHBNpdgxnaAkJgETSA4Ngk2xBK+MtgQMIKM8Tz7R5U8PbJU6ml1d3VXfz/n6HRXdXX1o5qRfqq33nrfyEwkSdL6xuouQJKkQWZQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEppREXECRHxmYj4bkS8ve56pEFlUEpdFhE3RMT3I+LOiLgtIs6NiGN68DnnRsTvbWEXu4FvAvfLzN/oUln3KI/Dad3er9RvBqXUG8/NzGOAJwLTwOsO581R6PXP5yRwbW4w6khEHNHjz5eGgkEp9VBm3gT8NfBjEXF8RHw0IvZFxLfK5w9Z3TYiPhURcxHxd8AK8PCIeExE7ImIOyLi3yLixeW2u4EZ4LXlmetHyvWPLffz7Yi4JiKet15dEXEucFbL+0+LiN+JiAsj4r0R8R3g7Ih4UERcWn7+9RHxypZ9/E5EfCAizi+bb6+JiOnytQuACeAj5f5f2/2jK/WHQSn1UEQ8FPg54AsUP2/voTiTmwC+D7xjzVteRtEkeiywD9gD/BXwo8AvAO+MiJMzcx5YAN6Smcdk5nMj4kjgI8Any+1/FViIiEevrSszz17z/svLl84ALgSOK19/H3Aj8CDghcDvR8SzWnb1vHKb44BLV7+fzHwZsEx5Zp2ZbzmsAycNEINS6o2LI+LbwGeBTwO/n5m3Z+ZFmbmSmd8F5oBnrHnfuZl5TWb+EHgOcENmviczf5iZXwAuAl60wWc+GTgGeHNm7s/MK4GPAi89jLqvysyLM/MAcH/gacBvZeYPMvOLwLuAl7ds/9nM/Hhm3g1cAPz4YXyWNBS8BiH1xpktZ2kARMQ48EcUAXh8ufrYiNhWBg3A11veMgk8qQzcVUdQBNJ6HgR8vQy5VUvAgw+j7tbPfxBwRxnqrfubblm+teX5CnB0RBxRBr3UCAal1D+/ATwaeFJm3hoRT6Boko2WbVo71nwd+HRmnr7B/tZ2wrkZeGhEjLWE5QTwlcOosXWfNwM7IuLYlrCcAG7qYF/S0LLpVeqfYymuS347InYAb9hk+48Cj4qIl0XEkeXXT0bEY8vXbwMe3rL9P1Cc1b223PaZwHMpriEetsz8OvD3wJsi4uiIeDzwS8B729zF2vqkoWRQSv3zx8B9KO5d/BzwiaqNy7O4Z1N04rmZopnzD4Cjyk3eDZxc9nC9ODP3UwTjz5af8U7g5Zn5r1uo+aXAVPn5HwbesLZJucKbgNeV9b1mCzVItQonbpYkaWOeUUqSVKHWoIyI55Q3UV8fEees8/pREfH+8vV/iIipGsqUJI2w2oIyIrYBf0ZxPeVk4KURcfKazX4J+FZmPpKiW/0f9LdKSdKoq/OM8lTg+sz8WtkJ4X0Uo4K0OgM4r3x+IfAzERFIktQndd5H+WAOvbn5RuBJG22TmT+MiP8AdlL06LtHOe7lboD73ve+pzzmMY/pVc2SpCG0d+/eb2bmAzp5byMGHCjHvZwHmJ6ezsXFxZorkiQNkohY6vS9dTa93gQ8tGX5Idx7xI97timn/PkR4Pa+VCdJEvUG5eeBkyLiYRGxneKm6kvXbHMpxVRAUMxccOVGc+dJktQLtTW9ltccXw1cBmwD/jIzr4mINwKLmXkpxcgjF0TE9cAdFGEqSVLf1HqNMjM/Dnx8zbrXtzz/ARtPKSRJUs85Mo8kSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUrRYWYGoKxsaKx4WFuiuSJNXsiLoLGBgLC7B7N6ysFMtLS8UywMxMfXVJkmrlGeWq2dmDIblqZaVYL0kaWQblquXlw1svSRoJBuWqiYnDWy9JGgkG5aq5ORgfP3Td+HixXpI0sgzKVTMzMD8Pk5MQUTzOz9uRR5JGnL1eW83MGIySpEN4RilJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUoXGBeXVV8PYGExNwcJC3dVIkobdEXUX0G379xePS0uwe3fxfGamvnokScOtcWeUrVZWYHa27iokScOs0UEJsLxcdwWSpGHW+KCcmKi7AknSMGt0UI6Pw9xc3VVIkoZZ44Jy+3aIgMlJmJ+3I48kaWsa1+t11y5YXKy7CklSU9RyRhkROyJiT0RcVz4ev842T4iIqyLimoj4UkS8pI5aJUmjra6m13OAKzLzJOCKcnmtFeDlmfk44DnAH0fEcf0rUZKk+oLyDOC88vl5wJlrN8jMr2TmdeXzm4FvAA/oV4GSJEF9QXlCZt5SPr8VOKFq44g4FdgOfHWD13dHxGJELO7bt6+7lUqSRlrPOvNExOXAA9d56ZCxcjIzIyIr9nMicAFwVmYeWG+bzJwH5gGmp6c33JckSYerZ0GZmadt9FpE3BYRJ2bmLWUQfmOD7e4HfAyYzczP9ahUSZI2VFfT66XAWeXzs4BL1m4QEduBDwPnZ+aFfaxNkqR71BWUbwZOj4jrgNPKZSJiOiLeVW7zYuCngbMj4ovl1xNqqVaSNLIis1mX9Kanp3PREQckSS0iYm9mTnfy3sYNYSdJUjcZlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAot2JhAaamYGyseFxYqLsiSVKXHVF3AUNrYQF274aVlWJ5aalYBpiZqa8uSVJXeUbZqdnZgyG5amWlWC9JagyDslPLy4e3XpI0lAzKTk1MHN56SdJQMig7NTcH4+OHrhsfL9ZLkhrDoOzUzAzMz8PkJEQUj/PzduSRpIax1+tWzMwYjJLUcJ5RSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklTBoJQkqYJBKUlSBYNSkqQKBqUkSRUMSkmSKhiUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDEpJkioYlJIkVTAoJUmqYFBKklShlqCMiB0RsScirisfj6/Y9n4RcWNEvKOfNUqSBPWdUZ4DXJGZJwFXlMsb+V3gM32pSpKkNeoKyjOA88rn5wFnrrdRRJwCnAB8sj9lSZJ0qLqC8oTMvKV8fitFGB4iIsaAtwOv2WxnEbE7IhYjYnHfvn3drVSSNNJ6FpQRcXlE/Ms6X2e0bpeZCeQ6u3gV8PHMvHGzz8rM+cyczszpW299AGNjMDUFCwvd+V4kSaPriF7tODNP2+i1iLgtIk7MzFsi4kTgG+ts9hTg6RHxKuAYYHtE3JmZVdcz2b+/eFxagt27i+czMx19C5Ik1db0eilwVvn8LOCStRtk5kxmTmTmFEXz6/mbheRaKyswO7vVUiVJo6yuoHwzcHpEXAecVi4TEdMR8a5uftDycjf3JkkaNVFcImyOiOmExXuWJyfhhhvqq0eSVL+I2JuZ0528t9Ej84yPw9xc3VVIkoZZ44Jy+3aIKM4k5+ftyCNJ2pqe9Xqty65dsLi4+XaSJLWjcWeUkiR1k0EpSVIFg1KSpAoGpSRJFTYMynIeyDdFxAUR8V/XvPbO3pcmSVL9qs4o3wMEcBHwCxFxUUQcVb725J5XJknSAKgKykdk5jmZeXFmPg/4J+DKiNjZp9okSapd1X2UR0XEWGYeAMjMuYi4CfgMxWwekiQ1XtUZ5UeAZ7WuyMxzgd8A9vewJkmSBsaGZ5SZ+doN1n8COKlnFUmSNEC8PUSSpAoGpSRJFQxKSZIqbBqUETEeEf87Iv6iXD4pIn6+96VJklS/ds4o3wP8J/CUcvkm4Pd6VlGXLCzA1BSMjRWPCwt1VyRJGkbtBOUjMvMtwF0AmblCMWLPwFpYgN27YWkJMovH3bsNS0nS4WsnKPdHxH2ABIiIR1CcYQ6s2VlYWTl03cpKsV6SpMNRNTLPqt8BPgE8NCIWgKcBZ/ewpi1bXj689ZIkbWTToMzMT0bEXoqB0AP4tcz8Zs8r24KJiaK5db31kiQdjnZ6vX4EeDbwqcz86KCHJMDcHIyPH7pufLxYL0nS4WjnGuXbgKcD10bEhRHxwog4usd1bcnMDMzPw+QkRBSP8/PFekmSDkdkZnsbRmyjGCT9lcBzMvN+vSysU9PT07m4uFh3GZKkARIRezNzupP3ttOZh7LX63OBlwBPBM7r5MMkSRo2mwZlRHwAOJWi5+s7gE+vzlEpSVLTtXNG+W7gpZl5d6+LkSRp0GwYlBHxrMy8ErgvcEbEoYPxZOaHelybJEm1qzqjfAZwJcW1ybUSMCglSY23YVBm5hvKp2/MzH9vfS0iHtbTqiRJGhDt3Ed50TrrLux2IZIkDaKqa5SPAR4H/EhEvKDlpfsBAz3ggCRJ3VJ1jfLRwM8Dx3HodcrvUgw6IElS41Vdo7wEuCQinpKZV/WxJkmSBkY71yh/JSKOW12IiOMj4i97V5IkSYOjnaB8fGZ+e3UhM78F/ETPKpIkaYC0E5RjEXH86kJE7KDNMWIlSRp27QTe24GrIuKD5fKLAGd2lCSNhE3PKDPzfOAFwG3l1wsy84JeF9apq6+GsTGYmoKFhbqrkSQNu3aaXgF2AN/LzHcA+wZ5ZJ79+yETlpZg927DUpK0NZsGZUS8Afgt4LfLVUcC7+1lUd2ysgKzs3VXIUkaZu2cUT4feB7wPYDMvBk4tpdFddPyct0VSJKGWTtBuT8zk2LGECLivr0tqbsmJuquQJI0zNoJyg9ExJ8Dx0XEK4HLgb/obVndMT4Oc/bPlSRtwaa3h2Tm2yLidOA7FOO/vj4z9/S8sg5t3w533VWcSc7NwcxM3RVJkoZZWwMHlME4sOHYatcuWFysuwpJUlNs2PQaEZ8tH78bEd9Z5+vfI+JV/StVkqT+q5o95KfKx3V7uEbETuDvgXf2pjRJkurXVtNrRDwR+CmKnq+fzcwvZObtEfHMHtYmSVLt2hlw4PXAecBO4P7AuRHxOoDMvKW35UmSVK92zihngB/PzB8ARMSbgS8Cv9fDuiRJGgjt3Ed5M3B0y/JRwE29KUeSpMGy4RllRPxfimuS/wFcExF7yuXTgX/sT3mSJNWrqul19W7EvcCHW9Z/qmfVSJI0YKpuDzkPICKOBh5Zrr5+9VqlJEmjoGrAgSMi4i3AjRS9Xs8Hvh4Rb4mII/tVoCRJdarqzPNWigmbH5aZp2TmE4FHAMcBb+tDbZIk1a4qKH8eeGVmfnd1RWZ+B/jvwM/1ujBJkgZBVVBmOQ/l2pV3U85NKUlS01UF5bUR8fK1KyPiF4F/7V1JkiQNjqrbQ/4H8KGIeAXFLSIA08B9gOf3ujBJkgZB1e0hNwFPiohnAY8rV388M6/oS2WSJA2ATcd6zcwrgSv7UIskSQOnnbFeJUkaWQalJEkVDEpJkioYlJIkVaglKCNiR0TsiYjrysfjN9huIiI+GRFfjohrI2Kqz6VKkkZcXWeU5wBXZOZJwBXl8nrOB96amY8FTgW+0af6JEkC6gvKMyhmJKF8PHPtBhFxMnBEZu4ByMw7M3OlbxVKkkR9QXlCZt5SPr8VOGGdbR4FfDsiPhQRX4iIt0bEtvV2FhG7I2IxIhb37dvXq5olSSNo0wEHOhURlwMPXOel2daFzMyIWG+Q9SOApwM/ASwD7wfOBt69dsPMnAfmAaanpx2wXZLUNT0Lysw8baPXIuK2iDgxM2+JiBNZ/9rjjcAXM/Nr5XsuBp7MOkEpSVKv1NX0eilwVvn8LOCSdbb5PHBcRDygXH4WcG0fapMk6R51BeWbgdMj4jrgtHKZiJiOiHfBPfNevga4IiKuBgL4i5rqlSSNqFhnbuahdtRR03nXXYtMTMDcHMzM1F2RJKluEbE3M6c7eW/PrlHWZf/+4nFpCXbvLp4blpKkTjV6CLuVFZid3Xw7SZI20uigBFhevve6hQWYmoKxseJxYaHfVUmShkXjg3Ji4tDlhYWiSXZpCTIPNtEaliPOv54kbaBxnXkiphMWARgfh/n5Q69RTk0V4bjW5CTccENfStSgWf3raaVlhMT1/vNIGlpb6czTuKDcrNfr2FhxJrlWBBw40J8aNWD860lqPHu9tti1CxYXN359YmL934lrm2g1Qta7kF21XtJIafw1yrXm5opWtVbj48V6jaiN/kryrydJjGBQzswUl54mJ4vm1slJL0WNPP96klShcU2v7ZiZMRjVYvU/w+xs0dzqsE6SWoxkUEr34l9PkjYwck2vkiQdjpEJSu8nlyR1YiSaXtfeT+6A6ZKkdo3EGeXs7KGDroADpkuS2jMSQen95JKkTo1EUHo/uSSpUyMRlN5PLknq1EgEpaPxSJI6NRK9XsH7ySVJnRmJM0pJkjplUEqSVMGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqmBQSpJUwaCUJKmCQSlJUgWDUpKkCgalJEkVDMotWFiAqSkYGyseFxbqrkiS1G0jMx9lty0swO7dsLJSLC8tFcvgvJeS1CSeUXZodvZgSK5aWSnWS5Kaw6Ds0PLy4a2XJA0ng7JDExOHt16SNJwMyg7NzcH4+KHrxseL9VJP2YtM6iuDskMzMzA/D5OTEFE8zs/bkUc9ttqLbGkJMg/2IjMspZ6JzKy7hq6anp7OxcXFusuQemNqqgjHtSYn4YYb+l2NNDQiYm9mTnfy3sadUV59tS1SajB7kUl917ig3L/fFik1mL3IpL5rXFC28r5GNY69yKS+a3RQgi1Sahh7kUl917jOPBHTCQc789jHQZJkZ54N2CIlSdqqxgXl9u22SEmSuqdxs4fs2gXeRilJ6pbGnVHecYeje0mSuqdxZ5RLS3DgwMHnzhEpSdqKxp1RrobkKu+llCRtReOCcj3eSylJ6tRIBKWje0mSOtW4oBxb8x15L6UkaSsaF5STk47uNQicW1hSUzSu1+uOHd5HWbfVuYVXVoplex9LGmaNO6NU/WZnD4bkKnsfSxpWjQvKvXtt6qubcwtLapLGBSU4aXPdnFtYUpM0MijBpr46ObewpCZpbFCCTX11cW5hSU3S6ImbnbRZkgRO3Lwum/rUT943KjVX4+6jhOJMcm7Opj71h/eNSs3WuKbX6enpXHTEAfXR1FQRjmvZ9C8NjqFreo2IHRGxJyKuKx+P32C7t0TENRHx5Yj404iIXtdmE5oOl/eNSs1W1zXKc4ArMvMk4Ipy+RAR8VTgacDjgR8DfhJ4Ri+LWm1CW1qCTO/HVHu8b1RqtrqC8gzgvPL5ecCZ62yTwNHAduAo4Ejgtl4W5dBr6oT3jUrNVldQnpCZt5TPbwVOWLtBZl4F/A1wS/l1WWZ+eb2dRcTuiFiMiMV9+/Z1XJRNaOqE941KzdazXq8RcTnwwHVeOuT8LDMzIu7VoygiHgk8FnhIuWpPRDw9M/927baZOQ/MQ9GZp9OaJybW75RhE5o2MzNjMEpN1bOgzMzTNnotIm6LiBMz85aIOBH4xjqbPR/4XGbeWb7nr4GnAPcKym6Zmzu0mz/YhCZJo66uptdLgbPK52cBl6yzzTLwjIg4IiKOpOjIs27Ta7fYhCZJWquW+ygjYifwAWACWAJenJl3RMQ08CuZ+csRsQ14J/DTFB17PpGZv77Zvr2PUpK01lbuo6xlZJ7MvB34mXXWLwK/XD6/G/hvfS5NkqRDNHasV0mSusGglCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIqGJSSJFUwKCVJqtC4oLz6aiddliR1T+OCcv/+e0+6vLBQBKcBKkk6XLWM9dpLEdMJB8d63bkTvv/9e88I4mDnkjQ6tjLWa+POKNe6/fZDQxKK5dnZ9beXJKlV44NyI8vLdVcgSRoGIxuUExN1VyBJGgYjGZTj4zA3V3cVkqRhMBJBuXMnTE5CRPFoRx5JUrtqmbi5l8bG4MCBg8vj4/Anf2IwSpI607gzyoiDz3fu9OxRkrQ1jQvKu+8++Pz736+vDklSMzQuKFt5v6QkaasaHZTg/ZKSpK1pfFB6v6SkvnBQ6cZqXK/XVt4vKakvFhaKWRhWx8tcnZUB7E3YAI0bFP2oo6bzrrsWmZgoQtL/o5J6bmqqCMe1Jifhhhv6XY3WsZVB0Rt3RrlrFywubr6dJHXNRp0h7CTRCI2/RilJPbdRZwg7STSCQSlJWzU3V3SKaGUnicYwKCVpq2ZmimHAHFS6kRp3jVKSajEzYzA2lGeUkiRVMCglSapgUEqSVMGglCSpgkEpSVIFg1KSmsYB2rvK20MkqUkcoL3rPKOUpCaZnT0YkqucxX5LDEpJahIHaO86g7JNNvlLGgoO0N51BmUbVpv8l5Yg82CTv2EpaeA4QHvXGZRtsMlf0tBwgPaui8ysu4aump6ezsUuz9w8NlacSa4VAQcOdPWjJEk9EBF7M3O6k/c27ozy6qu7fx3RJn9JdbOfRH0aF5T793f/OqJN/pLqZD+JejWu6TViOuFg0+vkJNxww9b3u7BQXJNcXi7OJOfmbPKX1B9TU0U4rtWt32+jYCtNr40PSlj/+qIkDQv7SWyd1ygrbNtm276k4WY/iXo1Pijvvtu2fUnDzX4S9Wp8UG7b5j2Qkoabt0bWq9HXKMfH7x2SB7ezbV+SRoXXKFts337oX1yTk+tvZ9u+JKkdjZuPctcuWDswT+vUbGDbviSpfY07o1zLtn1J0lY07oxyPTMzBqMkqTONO6PsxVivkqTR1bgzyv37i8elJXjFK4rnnk1KkjrVuDPKVvv3w6/9Wt1VSJKGWaODEuD22+uuQJI0zBoflJJGj+M7q5sad41yrZ07665AUj+tzt24eu/06vjOYH8FdabxZ5QvfnHdFUjqp9lZx3dWdzU+KM87z2YXaZQsLx/eemkzjQ9K/5KURotzN6rbGh+UUFyjkDQanLtR3TYSQbltm73gpFHh+M7qtkbPR9lq7dyU4+P+8EgaPgsLxeWk5eWiOXluzt9j7XA+yk1E2AtOOly2wgye1VtflpYg8+CtL/7b9FYtQRkRL4qIayLiQERsmPAR8ZyI+LeIuD4izun08zY6aR7WXnDd+gW22X6G4RdluzX2+3sZhmNXpd+/kIf9ePWLt77UJDP7/gU8Fng08ClgeoNttgFfBR4ObAf+GTh5832fksWP9uZfk5M5dN773szx8UO/j/HxYn0399Otz+mldmvs9/cyDMduM5OT/fuZacLx6peI9f9dIuqubPABi9lpZnX6xm58bRKUTwEua1n+beC3N99ne0E5rD+I3foFttl++vmLslPt1tjv72UYjt1m+vkLuQnHq188Vp3bSlDW2pknIj4FvCYz79X7JiJeCDwnM3+5XH4Z8KTMfPU62+4GykGqjjoFfmydT7v7h3DgABy5He7aDzffBN+8o3vfTb+ccsrGr+3du7X97AMeUO6nW5/TS+3W2PPv5f7AN/v4eX3w+F3Fz8pad+2HL13dpQ8pj1sTjle/3H8H3GcKfjQOrssDsLw0nL/P+urRmXlsJ2/s2VivEXE58MB1XprNzEu6+VmZOQ/Ml5+7mLnYUc+mUVccuyWP3WEqjltnvelGmcetM8VxW/a4HaaIuPftEG3qWVBm5mlb3MVNwENblh9SrpMkqW8G+faQzwMnRcTDImI78AvApTXXJEkaMXXdHvL8iLiRosPOxyLisnL9gyLi4wCZ+UPg1cBlwJeBD2TmNW3sfr5HZY8Cj11nPG6d8bh1xuPWmY6PW+NG5pEkqZsGuelVkqTaGZSSJFUY+qDs93B4TRIROyJiT0RcVz4ev8F2bymP8Zcj4k8jItbbblQcxnGbiIhPlsft2oiY6nOpA6Xd41Zue7+IuDEi3tHPGgdRO8ctIp4QEVeVP6dfioiX1FHrINjsd31EHBUR7y9f/4d2fi6HPiiBfwFeAHxmow0iYhvwZ8DPAicDL42Ik/tT3kA7B7giM08CriiXDxERTwWeBjyeYiSHnwSe0c8iB9Cmx610PvDWzHwscCrwjT7VN6jaPW4Av0vFz/SIaee4rQAvz8zHAc8B/jgijutfiYOhzd/1vwR8KzMfCfwR8Aeb7XfogzIzv5yZ/7bJZqcC12fm1zJzP/A+4IzeVzfwzgDOK5+fB5y5zjYJHE0x3u5RwJHAbf0oboBtetzKH84jMnMPQGbemZkra7cbMe38fyMiTgFOAD7Zn7IG3qbHLTO/kpnXlc9vpvij7AH9KnCAtPO7vvV4Xgj8zGatZEMflG16MPD1luUby3Wj7oTMvKV8fivFL6dDZOZVwN8At5Rfl2Xml/tX4kDa9LgBjwK+HREfiogvRMRby792R9mmxy0ixoC3A6/pZ2EDrp3/b/eIiFMp/rD9aq8LG0Dt/K6/Z5vyNsT/AHZW7bRnI/N0Uz+Hw2uaqmPXupCZGRH3ulcoIh5JMdvLQ8pVeyLi6Zn5t10vdoBs9bhR/Gw9HfgJYBl4P3A28O7uVjpYunDcXgV8PDNvHKVL4V04bqv7ORG4ADgrMw90t8rRNRRB6XB4nas6dhFxW0ScmJm3lD9g611Dez7wucy8s3zPX1MMFNHooOzCcbsR+GJmfq18z8XAk2l4UHbhuD0FeHpEvAo4BtgeEXdmZqM74HXhuBER9wM+RnEC8bkelTro2vldv7rNjRFxBPAjwO1VOx2VpleHw1vfpcBZ5fOzgPXOzpeBZ0TEERFxJEVHnlFvem3nuH0eOC4iVq8TPQu4tg+1DbJNj1tmzmTmRGZOUTS/nt/0kGzDpset/L32YYrjdWEfaxs07fyubz2eLwSuzM1G3ul0fq5B+aI447kR+E+KTiaXlesfRNGEs7rdzwFfoWi3n6277kH4omiXvwK4Drgc2FGunwbeVT7fBvw5RTheC/xh3XXX/dXOcSuXTwe+BFwNnAtsr7v2YThuLdufDbyj7rrr/mrz5/QXgbuAL7Z8PaHu2ms6Xvf6XQ+8EXhe+fxo4IPA9cA/Ag/fbJ8OYSdJUoVRaXqVJKkjBqUkSRUMSkmSKhiUkiRVMCglSapgUEp9FBEPiYhLypkgvhoRf1Le70VEnL3RbBkR8fcdft6ZrYNCR8QbI2KrA3i0PWuP1AQGpdQn5cDLHwIuzmImiEdRjD4zt9l7M/OpHX7smRSzKKzu5/WZeXmH+2q16aw9UlMYlFL/PAv4QWa+ByAz7wb+J/CKiBgvt3loRHyqPON8w+obI+LOlue/GRGfL+cd/D8t619ervvniLignCLtecBbI+KLEfGIiDg3Il5Yztn3wZb3PjMiPlo+f3Y5t+E/RcQHI+KYtd9Itjdrj9QIQzHWq9QQjwP2tq7IzO9ExDLwyHLVqRTzfq4An4+Ij2Xm4ur2EfFs4KRyuwAujYifphir8nXAUzPzmxGxIzPviIhLgY9mOaxZy0DjlwPzEXHfzPwe8BLgfRFx/3I/p2Xm9yLit4BfpxjZRBpJBqU0WPZk5u0AEfEh4KeAxZbXn11+faFcPoYiOH8c+GBmfhMgM++o+pDM/GFEfAJ4bkRcCPwX4LUUY/meDPxdGarbgau6861Jw8mglPrnWopBmO9RzvgwQTHu5BMpJsputXY5gDdl5p+v2c+vdlDP+4BXA3cAi5n53fI66p7MfGkH+5MayWuUUv9cAYxHxMsByomc3w6cm5kr5TanR8SOiLgPRUecv1uzj8sormkeU+7jwRHxo8CVwIsiYme5fke5/XeBYzeo59MU4fxKitAE+BzwtHIeUiLivhHxqC18z9LQMyilPsliBoLnUwTadRQzHPwA+F8tm/0jcBHFrCMXtVyfzHIfnwT+CrgqIq4GLgSOzcxrKHrPfjoi/hn4w/J97wN+MyK+EBGPWFPP3cBHgZ8tH8nMfRSzdvy/iPgSRbPrY9Z+LxHx/Ii4kWL+yI9FxGUdHxhpwDl7iDTgyrPEf8rMybprkUaRZ5TSAIuIB1Gc1b2t7lqkUeUZpSRJFTyjlCSpgkEpSVIFg1KSpAoGpSRJFQxKSZIq/H9/YsaKiEZ7oAAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plot_pareto_front(res_random)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Calculate the volume of the dominated region\n",
+ "\n",
+ "A solution that is not a Pareto solution, i.e., a solution $y$ for which there exists a solution $y'$ that is better than itself, is called a inferior solution ($\\exists y' y\\prec y'$). The volume of the inferior solution region, which is the space occupied by inferior solutions in the solution space (a subspace of the solution space), is one of the indicators of the results of multi-objective optimization. The larger this value is, the more good Pareto solutions are obtained.`res_random.pareto.volume_in_dominance(ref_min, ref_max)` calculates the volume of the inferior solution region in the hyper-rectangle specified by `ref_min` and `ref_max`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:14.666649Z",
+ "start_time": "2021-01-05T06:06:14.662809Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.2376881844865093"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "res_random.pareto.volume_in_dominance([-1,-1],[0,0])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Bayesian optimization\n",
+ "\n",
+ "For `bayes_search` in the multi-objective case, `score` can be selected from the following method\n",
+ "\n",
+ "- HVPI (HyperVolume-based Probability of Improvement)\n",
+ "- EHVI (Expected Hyper-Volume Improvement)\n",
+ "- TS (Thompson Sampling)\n",
+ "\n",
+ "The following 50 evaluations (10 random searches + 40 Bayesian optimizations) will be performed with different scores."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### HVPI (HyperVolume-based Probability of Improvement)\n",
+ "\n",
+ "The improvement probability of a non-dominated region in a multi-dimensional objective function space is obtained as a score. \n",
+ "\n",
+ "- Reference\n",
+ " - Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. \"Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.\" Journal of Global Optimization 60.3 (2014): 575-594."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:29.019463Z",
+ "start_time": "2021-01-05T06:06:14.668034Z"
+ },
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "policy = physbo.search.discrete_multi.policy(test_X=test_X, num_objectives=2)\n",
+ "policy.set_seed(0)\n",
+ "\n",
+ "policy.random_search(max_num_probes=10, simulator=simu)\n",
+ "res_HVPI = policy.bayes_search(max_num_probes=40, simulator=simu, score='HVPI', interval=10)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Plotting the Pareto solution\n",
+ "\n",
+ "We can see that more Pareto solutions are obtained compared to random sampling."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2021-01-05T06:06:29.190434Z",
+ "start_time": "2021-01-05T06:06:29.020967Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcoAAAG5CAYAAAAOKnSzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAkvElEQVR4nO3deZhld13n8fenyQJNwCSAISzVzRKWIIik2EV8MGHQERIURaaU5FHpcRiZeWZExGlGGLAFQdwGeR5LkAQohyUsCYuELAIuQamWzQQhgOkmEELYhNBigHznj3MqXV2punXr1t3v+/U89dx7zj333F+dp+t++nfO7/c9qSokSdL6doy6AZIkjTODUpKkDgxKSZI6MCglSerAoJQkqQODUpKkDgxKaUYlOSnJB5J8M8nLR90eaVwZlFKfJbk6yb8luSHJdUnOTXLcAD7n3CS/vY1d7AG+DNy+qn6tT826WXscTu/3fqVhMyilwXhiVR0HPASYB563lTenMei/z13AlbVB1ZEkRw3486WJYFBKA1RVnwf+EviBJCckeWeS65N8rX1+t5Vtk7wvyb4kfwscAu6Z5H5JLk7y1SSfTPKz7bZ7gAXgOW3P9R3t+vu3+/l6kiuSPGm9diU5Fzh71ftPT/KCJOcneX2SbwDnJLlLkgvbz/90kmes2scLkrwpyWvb07dXJJlvX3sdMAe8o93/c/p/dKXhMCilAUpyd+AngA/T/L29hqYnNwf8G/CKNW/5BZpTorcDrgcuBv4C+H7g54BXJjm1qhaBJeClVXVcVT0xydHAO4D3tts/C1hKct+17aqqc9a8/5L2pTOB84Hj29ffAFwD3AV4CvA7SR63aldParc5Hrhw5fepql8ADtL2rKvqpVs6cNIYMSilwXh7kq8DfwO8H/idqvpKVb2lqg5V1TeBfcBj17zv3Kq6oqq+CzwBuLqqXlNV362qDwNvAX5mg898BHAc8JKqurGqLgPeCTxtC+2+vKreXlU3AXcEHg38RlV9u6o+ArwKePqq7f+mqt5dVd8DXgf84BY+S5oIXoOQBuOsVb00AJLsBP6AJgBPaFffLsmt2qAB+Nyqt+wCHt4G7oqjaAJpPXcBPteG3IoDwF230O7Vn38X4KttqK/e3/yq5S+uen4IuHWSo9qgl6aCQSkNz68B9wUeXlVfTPJgmlOyWbXN6oE1nwPeX1VnbLC/tYNwvgDcPcmOVWE5B3xqC21cvc8vACcmud2qsJwDPt/DvqSJ5alXaXhuR3Nd8utJTgSev8n27wTuk+QXkhzd/jw0yf3b168D7rlq+7+n6dU9p932R4En0lxD3LKq+hzwd8CLk9w6yYOAXwJe3+Uu1rZPmkgGpTQ8fwjchmbu4geB93TauO3FPZ5mEM8XaE5z/i5wbLvJq4FT2xGub6+qG2mC8cfbz3gl8PSq+udttPlpwO72898GPH/tKeUOXgw8r23fs7fRBmmk4o2bJUnamD1KSZI6GGlQJnlCO4n600meu87rxyZ5Y/v63yfZPYJmSpJm2MiCMsmtgD+huZ5yKvC0JKeu2eyXgK9V1b1phtX/7nBbKUmadaPsUT4M+HRVfbYdhPAGmqogq50JnNc+Px/4sSRBkqQhGeU8yrty5OTma4CHb7RNVX03yb8Cd6AZ0Xeztu7lHoDb3va2p93vfvc7/OL+/Ru34LTTem27JGmC7N+//8tVdade3jsVBQfaupeLAPPz87W8vHz4xd274cCBW75p1y5YvZ0kaWolWScIujPKU6+fB+6+avlu3LLix83btLf8+T7gK1v6lH37YOfOI9ft3NmslyRpE6MMyg8BpyS5R5JjaCZVX7hmmwtpbgUEzZ0LLtvo3nkbWliAxcWmB5k0j4uLzXpJkjYxslOv7TXHXwUuAm4F/HlVXZHkhcByVV1IU3nkdUk+DXyVJky3bmHBYJQk9WSk1yir6t3Au9es+61Vz7/NxrcUkiRp4KzMI0lSBwalJEkdGJSSJHVgUEqS1MHsBuXSUlOMYMeO5nFpadQtkiSNoamozLNlS0uwZw8cOtQsHzjQLIPTSCRJR5jNHuXevYdDcsWhQ816SZJWmc2gPHhwa+slSTNrNoNybm5r6yVJM2s2g9JC6ZKkLs1mUFooXZLUpdkc9QoWSpckdWU2e5SSJHXJoJQkqQODUpKkDgxKSZI6MCglSerAoNyIRdMlSczy9JBOLJouSWrZo1yPRdMlSS2Dcj0WTZcktQzK9Vg0XZLUMijXY9F0SVLLoFyPRdMlSS1HvW7EoumSJOxRSpLUkUEpSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBuVWLS3B7t2wY0fzuLQ06hZJkgbIWq9bsbQEe/YcvqnzgQPNMlgXVpKmlD3Krdi793BIrjh0qFkvSZpKBuVWHDy4tfWSpIlnUG7F3NzW1kuSJp5BuRX79sHOnUeu27mzWS9JmkoG5VYsLMDiIuzaBUnzuLjoQB5JmmKOet2qhQWDUZJmiD1KSZI6MCjXsJ6AJGk1T72uYj0BSdJa9ihXsZ6AJGktg3IV6wlIktYyKFexnoAkaS2DchXrCUiS1jIoV7GegCRpLUe9rmE9AUnSavYoJUnqwKCUJKkDg1KSpA4MSkmSOjAoJUnqwKDsN6uqS9JUcXpIP1lVXZKmjj3KfrKquiRNHYOyn6yqLklTx6DsJ6uqS9LUMSj7yarqkjR1DMp+sqq6JE0dR732m1XVJWmq2KOUJKkDg1KSpA5GEpRJTkxycZKr2scT1tnmwUkuT3JFko8leeoo2ipJmm2j6lE+F7i0qk4BLm2X1zoEPL2qHgA8AfjDJMcPr4mSJI0uKM8EzmufnwectXaDqvpUVV3VPv8C8CXgTsNqoCRJMLqgPKmqrm2ffxE4qdPGSR4GHAN8ZoPX9yRZTrJ8/fXX97elkqSZNrDpIUkuAe68zktHFD6tqkpSHfZzMvA64Oyqumm9bapqEVgEmJ+f33BfkiRt1cCCsqpO3+i1JNclObmqrm2D8EsbbHd74F3A3qr64ICaKknShkZ16vVC4Oz2+dnABWs3SHIM8DbgtVV1/hDbJknSzUYVlC8BzkhyFXB6u0yS+SSvarf5WeBHgHOSfKT9efBIWitJmlmpmq5LevPz87W8vDzqZkiSxkiS/VU138t7rcwjSVIHBqUkSR0YlJIkdWBQSpLUgUEpSVIHBuWoLC3B7t2wY0fzuLQ06hZJktYxsMo86mBpCfbsgUOHmuUDB5plgIWF0bVLknQL9ihHYe/ewyG54tChZr0kaawYlKNw8ODW1kuSRsagHIW5ua2tlySNjEE5Cvv2wc6dR67bubNZL0kaKwblKCwswOIi7NoFSfO4uOhAHkkaQ456HZWFBYNRkiaAPUpJkjowKCVJ6sCglCSpA4NSkqQODEpJkjowKCVJ6sCglCSpA4NSkqQODEpJkjowKCVJ6sCgHFdLS7B7N+zY0TwuLY26RZI0k6z1Oo6WlmDPnsM3dz5woFkG68NK0pDZoxxHe/ceDskVhw416yVJQ2VQbsPAzo4ePLi19ZKkgTEoe7RydvTAAag6fHa0L2E5N7e19ZKkgTEoezTQs6P79sHOnUeu27mzWS9JGiqDskcDPTu6sACLi7BrFyTN4+KiA3kkaQQc9dqjubnmdOt66/tiYcFglKQxYI+yR54dlaTZYFD2aOhnRy1AIEkj4anXbRja2VELEEjSyNijnAQWIJCkkTEoJ4EFCCRpZAzKSWABAkkaGYNyEjjEVpJGxqCcBBYgkKSRcdTrpLAAgSSNhD1KSZI6MCglSerAoJQkqQODUpKkDgxKSZI6MCglSerAoJQkqQODUpKkDgxKSZI6MCglSerAoJwmS0uwezfs2NE8Li2NukWSNPGs9TotlpZgz57DN3g+cKBZBmvEStI22KOcFnv3Hg7JFYcONeslST0zKKfFwYNbWy9J6opBOS3m5ra2XpLUFYNyWuzbBzt3Hrlu585mvSSpZwbltFhYgMVF2LULkuZxcdGBPJK0TY56nSYLCwajJPWZPUpJkjowKKedRQgkaVs89TrNLEIgSdtmj3KaWYRAkrbNoJxmFiGQpG0zKKeZRQgkadsMymlmEQJJ2jaDcppZhECSts1Rr9POIgSStC0j6VEmOTHJxUmuah9P6LDt7ZNck+QVw2yjJEkwulOvzwUurapTgEvb5Y28CPjAUFolSdIaowrKM4Hz2ufnAWett1GS04CTgPcOp1mSJB1pVEF5UlVd2z7/Ik0YHiHJDuDlwLM321mSPUmWkyxff/31/W2pJGmmDWwwT5JLgDuv89IRZWGqqpLUOts9E3h3VV2TpONnVdUisAgwPz+/3r4kSerJwIKyqk7f6LUk1yU5uaquTXIy8KV1Nnsk8JgkzwSOA45JckNVdbqeKUlSX41qesiFwNnAS9rHC9ZuUFU3z2lIcg4wb0hKkoZtVNcoXwKckeQq4PR2mSTzSV41ojZJknQLqZquS3rz8/O1vLw86mZIksZIkv1VNd/Ley1hJ0lSBwalJEkdGJQ6bGkJdu+GHTuax6WlUbdIkkbOouhqLC3Bnj1w6FCzfOBAswwWVZc00+xRqrF37+GQXHHoULNekmaYQanGwYNbWy9JM8KgVGNubmvrJWlGbBiU7X0gX5zkdUn+05rXXjn4pmmo9u2DnTuPXLdzZ7NekmZYpx7la4AAbwF+LslbkhzbvvaIgbdMw7WwAIuLsGsXJM3j4qIDeSTNvE6jXu9VVT/dPn97kr3AZUmeNIR2aRQWFgxGSVqjU1Aem2RHVd0EUFX7knwe+ADN3TwkSZp6nU69vgN43OoVVXUu8GvAjQNsk8aNhQgkzbANe5RV9ZwN1r8HOGVgLdJ4sRCBpBnn9BB1ZiECSTPOoFRnFiKQNOMMSnVmIQJJM27ToEyyM8n/TvJn7fIpSX5y8E3TWLAQgaQZ102P8jXAvwOPbJc/D/z2wFrUJw7U7BMLEUiacd3cZuteVfXUJE8DqKpDSTLgdm2LAzX7zEIEkmZYNz3KG5PcBiiAJPei6WGOLQdqSpL6pZugfAHwHuDuSZaAS4F151iOCwdqDonntyXNgE1PvVbVe5PspymEHuC/V9WXB96ybZiba063rrdefeL5bUkzoptRr+8AHg+8r6reOe4hCQ7UHArPb0uaEd2cev094DHAlUnOT/KUJLcecLu2xYGaQ+D5bUkzIlXV3YbJrWiKpD8DeEJV3X6QDevV/Px8LS8vj7oZ02/37vXPb+/aBVdfPezWSFJHSfZX1Xwv7+2qMk876vWngV8BHgqc18uHaYp4flvSjOjmGuWbgE/Q9CZfQTOv8lmDbpjGnOe3Jc2IbgoOvBp4WlV9b9CN0YSxEIGkGbBhUCZ5XFVdBtwWOHNtMZ6qeuuA2yZJ0sh1OvX62Pbxiev8WBRdnVmMQNKU2LBHWVXPb5++sKr+ZfVrSe4x0FZpslmMQNIU6WbU61vWWXd+vxuiKWIxAklTpNM1yvsBDwC+L8lPrXrp9sBYFxzQiFmMQNIU6TTq9b401yKPp7kuueKbNEUHpPVZbFfSFOl0jfIC4IIkj6yqy4fYJk26ffuOvEYJFiOQNLG6uUb5K0mOX1lIckKSPx9ckzTxLEYgaYp0U3DgQVX19ZWFqvpakh8aXJM0FSxGIGlKdNOj3JHkhJWFJCfSXcBKkjTxugnKlwOXJ3lRkhcBfwe8dLDN0lSzGIGkCbJpz7CqXptkmaYoOsBPVdWVg22WppbFCCRNmK5uswWcCHyrql4BXG9lHvXMYgSSJkw3t9l6PvAbwG+2q44GXj/IRmmKWYxA0oTppkf5ZOBJwLcAquoLwO0G2ShNsY2KDliMQNKY6iYob6yqAgogyW0H2yRNtX37muIDq1mMQNIY6yYo35TkT4HjkzwDuAT4s8E2S1PLYgSSJkyazuImGyVnAI8HAlxUVRcPumG9mp+fr+Xl5VE3Q5I0RpLsr6r5Xt7bVeGANhjHNhwlSRqUDU+9Jvmb9vGbSb6xzs+/JHnm8JoqSdLwdbp7yA+3j+uOcE1yB5oqPa8cTNMkSRq9rgoOJHlIkv+W5FkrBdGr6ivAjw6ycZohlrWTNKa6KTjwW8B5wB2AOwLnJnkeQFVdO9jmaSaslLU7cACqDpe1MywljYFNR70m+STwg1X17Xb5NsBHquq+Q2jfljnqdQLt3t2E41q7dsHVVw+7NZKm0HZGvXZz6vULwK1XLR8LfL6XD5PWZVk7SWNsw8E8Sf4vTTWefwWuSHJxu3wG8A/DaZ5mwtzc+j1Ky9pJGgOd5lGunL/cD7xt1fr3Daw1mk379h156y2wrJ2ksbHhqdeqOq+qzgPeSBOW+4E3rlov9Ue3Ze0cGStpBDqdej0K+B3gF4EDNOXr7p7kNcDeqvrOcJqombCw0Lneqzd8ljQinQbzvIzmhs33qKrTquohwL2A44HfG0LbpMO84bOkEekUlD8JPKOqvrmyoqq+AfwX4CcG3TDpCI6MlTQinYKyap1JllX1Pdp7U0pD4w2fJY1Ip6C8MsnT165M8vPAPw+uSdI6vOGzpBHpND3kvwJvTfKLNCNeAeaB2wBPHnTDpCOsDNjZu7c53To314SkA3kkDVg3JeweBzygXbyyqi4deKu2wRJ2kqS1Bnrj5qq6DLisl51LkjTpurrNljT2LEYgaUA27VFKY89iBJIGyB6lJp/FCCQNkEGpyWcxAkkDNJKgTHJikouTXNU+nrDBdnNJ3pvkE0muTLJ7yE3VJLAYgaQBGlWP8rnApVV1CnBpu7ye1wIvq6r7Aw8DvjSk9mmSWIxA0gCNKijPBFZu1XUecNbaDZKcChxVVRcDVNUNVXVo7XZS17fpkqQebFpwYCAfmny9qo5vnwf42sryqm3OAn4ZuBG4B3AJ8Ny21uza/e0B9gDMzc2dduDAgUE2X5I0YbZTcGBgPcoklyT5p3V+zly9XVt4fb20Pgp4DPBs4KHAPYFz1vusqlqsqvmqmr/Tne7U319E08F5lpJ6NLB5lFV1+kavJbkuyclVdW2Sk1n/2uM1wEeq6rPte94OPAJ49SDaqynmPEtJ2zCqa5QXAme3z88GLlhnmw8BxydZ6SI+DrhyCG3TtHGepaRtGFVQvgQ4I8lVwOntMknmk7wKbr7v5bOBS5N8HAjwZyNqryaZ8ywlbcNIBvMM0rHHztd3vrPsXZh02O7dzenWtXbtgquvHnZrJI3AWA7mGZUbb4Sqw5ehHLMh51lK2o6pC8rVvAwlwHmWkrZl6k69JvMFy6uW4aabRtggSdLIeeq1A8t9qivOs5S0gam+H6WXodQV51lK6mDqTr066lVb5qhYaept59Tr1PUoH/hAWF7efDvpZs6zlNTB1F+jlDbl/SwldWBQSs6zlNSBQSk5z1JSB1N3jVLqycKCwShpXfYopW44z1KaWfYopc04z1KaafYopc14P0tpphmU0macZynNNINS2ozzLKWZZlBKm3GepTTTDEppM86zlGaaQSl1Y2GhKZB+003N43oh6RQSaSo5PUTqB6eQSFPLHqXUD04hkaaWQSn1g1NIpKllUEr94BQSaWoZlFI/OIVEmloGpdQPTiGRppajXqV+8VZd0lSyRykNg3MspYllj1IaNOdYShPNHqU0aM6xlCaaQSkNmnMspYlmUEqD5hxLaaIZlNKgOcdSmmgGpTRozrGUJppBKQ1DN7fpAqeRSGPIoNwGv9PUVyvTSA4cgKrD00j8hyWNlEHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HdOI5HGkkHZI7/T1HfdTiNxFJk0VBZF79HKd9fevc3p1rm5JiSdGqdt2exWXRZYl4YuVTXqNvTV/Px8LS8vj7oZ0mDs3t2E41q7djXzMyWtK8n+qprv5b2eepUmiaPIpKEzKKVJ4igyaegMSmmSOIpMGjqDUpokFliXhs6glCZNNwXWnUIi9Y3TQ6Rp4xQSqa/sUUrTxkLEUl8ZlNK0cQqJ1FcGpTRtnEIi9ZVBKU0bp5BIfWVQStNmK1NIHB0rbcpRr9I02qy4Ojg6VuqSPUppVjk6VuqKQSnNKkfHSl0xKKVZ5ehYqSsGpTSrHB0rdcWglGaVBdalrjjqVZpl3YyOlWacPUpJkjowKCVJ6sCglCSpA4NSkqQODEpJ22fNWE0xR71K2h5rxmrK2aOUtD3WjNWUG0lQJjkxycVJrmofT9hgu5cmuSLJJ5L8cZIMu62SNmHNWE25UfUonwtcWlWnAJe2y0dI8ijg0cCDgB8AHgo8dpiNlNQFa8Zqyo0qKM8Ezmufnwectc42BdwaOAY4FjgauG4YjZO0BVupGeugH02gUQXlSVV1bfv8i8BJazeoqsuBvwKubX8uqqpPrLezJHuSLCdZvv766wfVZknr6bZm7MqgnwMHoOrwoB/DUmMuVTWYHSeXAHde56W9wHlVdfyqbb9WVUdcp0xyb+CPgKe2qy4GnlNVf93pc+fn52t5eXk7TZc0CLt3N+G41q5dcPXVw26NZkyS/VU138t7BzY9pKpO3+i1JNclObmqrk1yMvCldTZ7MvDBqrqhfc9fAo8EOgalpDHloB9NqFGder0QOLt9fjZwwTrbHAQem+SoJEfTDORZ99SrpAngoB9NqFEF5UuAM5JcBZzeLpNkPsmr2m3OBz4DfBz4KPDRqnrHKBorqQ+8UbQm1Egq81TVV4AfW2f9MvDL7fPvAf95yE2TNCgrg3v27m1Ot87NNSFp9R6NOSvzSBqehYVm4M5NNzWPCwtOGdHYs9arpNGxTqwmgD1KSaNjnVhNAINS0ug4ZUQTwKCUNDrbmTLitU0NiUEpaXR6nTJiOTwNkUEpaXS6rRO7ltc2NUQDq/U6KtZ6lWbAjh1NT3KtpJl6Iq2xnVqv9iglTR7L4WmIDEpJk8dyeBoig1LS5On12qbUAyvzSJpMCwsGo4bCHqUkSR0YlJJk8QJ14KlXSbPNwuzahD1KSbPN4gXahEEpabZZmF2bMCglzTaLF2gTBqWk2WbxAm3CoJQ02yxeoE046lWSLF6gDuxRSlK/OB9zKtmjlKR+cD7m1LJHKUn94HzMqWVQSlI/OB9zahmUktQPzsecWgalJPWD8zGnlkEpSf3gfMypZVBKUr8sLMDVV8NNNzWPWw1Jp5eMJaeHSNI4cHrJ2LJHKUnjwOklY8uglKRx4PSSsWVQStI4cHrJ2DIoJWkcOL1kbBmUkjQO+jm9xNGzfeWoV0kaF/243ZejZ/vOHqUkTRNHz/adQSlJ08TRs31nUErSNHH0bN8ZlJI0TRw923cGpSRNE4uz952jXiVp2vRj9KxuZo9SktSdGZ2faY9SkrS5GZ6faY9SkrS5GZ6faVBKkjY3w/MzDUpJ0uZmeH6mQSlJ2twMz880KCVJm+v3/MwJGkHrqFdJUnf6NT9zwkbQ2qOUJA3XhI2gNSglScM1YSNoDUpJ0nBN2Ahag1KSNFwTNoLWoJQkDVcvI2hHOEo2VTW0DxuGY4+dr+98Z5m5ueY/J2M4gEqStBVrR8lC0wPdwvSUJPurar6Xj5+6oEzmC5aBLR9HSdI42r27mUKy1q5dcPXVXe3CoFxldVDClo6jJGkc7dgB62VVAjfd1NUuthOUU3+NckxHG0uSujXiUbJTH5RjOtpYktStXkfJrhoA9CB4YK8fP9VBOcajjSVJ3ep1lOyePc21zSqOhmN6/fipu0bpqFdJ0toBQPPAclV62dXUFUV/4ANheXnz7SRJU6yPA1Sm+tSrJGlG9XGAikEpSZo+6w0A6pFBKUmaPmsGAH0Hbux1V1MXlB//+ETcMFsaexN0A3ppfQsLTcWZm27iY/DxXnczkqBM8jNJrkhyU5INKyUkeUKSTyb5dJLndrPvG29sCjis3DDbP+6NTcMX4bj+DuParm6tGVnv35NmW1UN/Qe4P3Bf4H3A/Abb3Ar4DHBPmvkvHwVO3Xzfp1Xzp9387NpVWsfrX1+1c2cdcax27mzWT4px/R3GtV1bsWvXke3370mTDliuHjNrpPMok7wPeHZV3WJCR5JHAi+oqv/QLv8mQFW9uPM+j6z1uoVSgDOlDzWGR25cf4dxbddW9KG0pjRWtlPrdZznUd4V+Nyq5WuAh6+3YZI9wJ5m6ViaqaWNqu/cmHys53PT0+u002657noOHLgTyf79w29PL9b7HZqQGvLvcEfgy4cXx6Zd2/CgB8LRt6hk0ue/pzXHTV3yuPXmvr2+cWBBmeQS4M7rvLS3qi7o52dV1SKw2H7uctVyT/9rmHXNsTvgsdui5rj19j/VWeZx643HrTdJei5FM7CgrKrTt7mLzwN3X7V8t3adJElDM87TQz4EnJLkHkmOAX4OuHDEbZIkzZhRTQ95cpJrgEcC70pyUbv+LkneDVBV3wV+FbgI+ATwpqq6oovdLw6o2bPAY9cbj1tvPG698bj1pufjNnV3D5EkqZ/G+dSrJEkjZ1BKktTBxAflIMvhTbskJya5OMlV7eMJG2z30vYYfyLJHyfp6ean02ILx20uyXvb43Zlkt1DbupY6fa4tdvePsk1SV4xzDaOo26OW5IHJ7m8/Tv9WJKnjqKt42Cz7/okxyZ5Y/v633fzdznxQQn8E/BTwAc22iDJrYA/AX4cOBV4WpJTh9O8sfZc4NKqOgW4tF0+QpJHAY8GHgT8APBQ4LHDbOQY2vS4tV4LvKyq7g88DPjSkNo3rro9bgAvosPf9Izp5rgdAp5eVQ8AngD8YZLjh9fE8dDld/0vAV+rqnsDfwD87mb7nfigrKpPVNUnN9nsYcCnq+qzVXUj8AbgzMG3buydCZzXPj8POGudbQq4NU293WOBo4HrhtG4MbbpcWv/OI+qqosBquqGqjo0tBaOp27+vZHkNOAk4L3DadbY2/S4VdWnquqq9vkXaP5TdqdhNXCMdPNdv/p4ng/82GZnySY+KLu0Xjm8u46oLePkpKq6tn3+RZovpyNU1eXAXwHXtj8XVdUnhtfEsbTpcQPuA3w9yVuTfDjJy9r/7c6yTY9bkh3Ay4FnD7NhY66bf283S/Iwmv/YfmbQDRtD3XzX37xNOw3xX4E7dNrpONd6vdkwy+FNm07HbvVCVVWSW8wVSnJvmru93K1ddXGSx1TVX/e9sWNku8eN5m/rMcAPAQeBNwLnAK/ub0vHSx+O2zOBd1fVNbN0KbwPx21lPycDrwPOrirL1/fJRASl5fB61+nYJbkuyclVdW37B7beNbQnAx+sqhva9/wlTaGIqQ7KPhy3a4CPVNVn2/e8HXgEUx6UfThujwQek+SZwHHAMUluqKqpHoDXh+NGktsD76LpQHxwQE0dd918169sc02So4DvA77SaaezcurVcnjruxA4u31+NrBe7/wg8NgkRyU5mmYgz6yfeu3muH0IOD7JynWixwFXDqFt42zT41ZVC1U1V1W7aU6/vnbaQ7ILmx639nvtbTTH6/whtm3cdPNdv/p4PgW4rDarvNPrjSzH5Yemx3MN8O80g0wuatffheYUzsp2PwF8iua8/d5Rt3scfmjOy18KXAVcApzYrp8HXtU+vxXwpzTheCXw+6Nu96h/ujlu7fIZwMeAjwPnAseMuu2TcNxWbX8O8IpRt3vUP13+nf488B3gI6t+Hjzqto/oeN3iux54IfCk9vmtgTcDnwb+AbjnZvu0hJ0kSR3MyqlXSZJ6YlBKktSBQSlJUgcGpSRJHRiUkiR1YFBKQ5TkbkkuaO8E8Zkkf9TO9yLJORvdLSPJ3/X4eWetLgqd5IVJtlvAo+u79kjTwKCUhqQtvPxW4O3V3AniPjTVZ/Zt9t6qelSPH3sWzV0UVvbzW1V1SY/7Wm3Tu/ZI08KglIbnccC3q+o1AFX1PeB/AL+YZGe7zd2TvK/tcT5/5Y1Jblj1/NeTfKi97+D/WbX+6e26jyZ5XXuLtCcBL0vykST3SnJukqe09+x786r3/miSd7bPH9/e2/Afk7w5yXFrf5Hq7q490lSYiFqv0pR4ALB/9Yqq+kaSg8C921UPo7nv5yHgQ0neVVXLK9sneTxwSrtdgAuT/AhNrcrnAY+qqi8nObGqvprkQuCd1ZY1W1Vo/BJgMcltq+pbwFOBNyS5Y7uf06vqW0l+A/ifNJVNpJlkUErj5eKq+gpAkrcCPwwsr3r98e3Ph9vl42iC8weBN1fVlwGq6qudPqSqvpvkPcATk5wP/EfgOTS1fE8F/rYN1WOAy/vzq0mTyaCUhudKmiLMN2vv+DBHU3fyITQ3yl5t7XKAF1fVn67Zz7N6aM8bgF8FvgosV9U32+uoF1fV03rYnzSVvEYpDc+lwM4kTwdob+T8cuDcqjrUbnNGkhOT3IZmIM7frtnHRTTXNI9r93HXJN8PXAb8TJI7tOtPbLf/JnC7DdrzfppwfgZNaAJ8EHh0ex9Sktw2yX228TtLE8+glIakmjsQPJkm0K6iucPBt4H/tWqzfwDeQnPXkbesuj5Z7T7eC/wFcHmSjwPnA7erqitoRs++P8lHgd9v3/cG4NeTfDjJvda053vAO4Efbx+pqutp7trx/5J8jOa06/3W/i5JnpzkGpr7R74ryUU9HxhpzHn3EGnMtb3Ef6yqXaNuizSL7FFKYyzJXWh6db836rZIs8oepSRJHdijlCSpA4NSkqQODEpJkjowKCVJ6sCglCSpg/8PSC/HP/JR9eEAAAAASUVORK5CYII=",
+ "text/plain": [
+ "
If num_search_each_probe (described below) is set to 2 or more, action will be input as an array of action IDs.
+
Thus, define the simulator to return a list of evaluation values for each action ID.
+
+
The definitions in the basic tutorial and simulator are the same, but keep in mind that t is a numpy.array, and when action is an array, self.t[action] will also be an array.
With res.export_sequence_best_fx(), you can get the best value obtained at each step and the history of the action.
+
The difference between res.export_all_sequence_best_fx() and res.export_all_sequence_best_fx() is that the information is not for each evaluation of the simulator, but for each search step. In this case, the total number of steps is 10, and the number of evaluations is 100.
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "best_fx, best_action = res.export_all_sequence_best_fx()\n",
+ "plt.plot(best_fx)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With `res.export_sequence_best_fx()`, you can get the best value obtained at each step and the history of the action. \n",
+ "\n",
+ "The difference between `res.export_all_sequence_best_fx()` and `res.export_all_sequence_best_fx()` is that the information is not for each evaluation of the simulator, but for each search step. In this case, the total number of steps is 10, and the number of evaluations is 100."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2020-12-04T06:20:24.535770Z",
+ "start_time": "2020-12-04T06:20:24.418352Z"
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[]"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAD3CAYAAAD2S5gLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAAdSklEQVR4nO3dfZBV9Z3n8fcHmgclICE2DyqiBjQKEpGbSFKDEJJAxo2aYZKyyjhkqkJwMvsQluyktuIuNVXj1uxGnThumGxIJVvBnR3GmGyJyyLBiXSoKMTGiLcJ8SHMoN0KaVAuLbY89Xf/6HPx0t2X23374dy+9/OqulXn/s7vnPM7F+p++pzvPecoIjAzMys0Iu0BmJlZ5XE4mJlZNw4HMzPrxuFgZmbdOBzMzKyburQHMBAuvvjiuOKKK9IehpnZsLJ79+7DEVHf07yqCIcrrriCxsbGtIdhZjasSDpQbJ5PK5mZWTcOBzMz66ascJC0RtJOSWtK9HtI0g5Jawva7pLUIGmTpAuKra+32zAzs4HX53CQNB2YERELgCWSZhbpNw8gIhYCcyRNlzQWuBX4BLA8Itp7Wl9vt2FmZoOjnCOHpcBGSfOBycDCIv1mAXuS6aeAucBNQDuwFcgfTfS0vt5uw8zMBkHJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6QY6v/y7rq+ntq7jWiWpUVJja2tr7/bWzMx6peRPWSNiA7Ah/17SSmAdsJLOL/rDRZbLStovaStwBMgBI4GGiOiQ1ABcA7QWWd95txER64H1AJlMxreWNTMbQOVc57AN+ExENEn6bxQERw/ujYjTkv4e2AWMAb4CfAeYDTxMZ3B0Xd/bfdiGmQ2yN4+f5B9+9SonTp1JeyjWxdVTx/PZuZcM+Hr7HA4RcSD5FdEu4PGI2AsgaSLwlxGxOnk/Hng0KUJ/PyKOJO3PSnoaeC4inkvaelpftzYzG3ovHWrjyz96ltfebEdKezTW1WfnXjIo4aBqeNhPJpMJXyFtNvB+/ttD/Lt/eJ4LRo/k+ysy3DB9YtpDsgEkaXdEZHqaVxW3zzCzgRURfH/Hfv56y2+ZfckEvr8iw7SLLkh7WDaEHA5mdo4Tp8/wn/5PEz/e3cy/un4a93/hw1wwemTaw7Ih5nAws7MOv32CP3t4N40H3uJrn5zF1z45ixEjXGioRQ4HMwNg3xvHWPmjRo4cP8F37pw3KEVOGz4cDmbGz/YeZPU/Ps/4sXX8+O6Pc/1lF6U9JEuZw8GshkUE3234HfdtfZG5l17E+hUZpkwYm/awrAI4HMxq1LunzvDNn2b56a9buPXDl3Df5+cydpQLz9bJ4WBWg37f9i53P7ybX796lK9/+mr+zZKZyFe4WQGHg1mN2ft6jq/8qJG33jnFd794I394/bS0h2QVyOFgVkOeaHqDf/+Pe3j/haP48Z99jDmXuvBsPXM4mNWAiOA7P3+FB7a9xLzLJ/K9P5nP5PEuPFtxDgezKvfuqTN849EX2LTndf5o3qX89fLrXXi2khwOZlXs0LF3WbWhkRdacnzjM9fw1UUfdOHZesXhYFalss05vrKhkWPvnuJ7d81n6eypaQ/JhhGHg1kV2vzCG3z9x8/zgXFj+MlXP8610yakPSQbZhwOZlUkIvjbf3qZB598mcyM9/M//mQ+F79vTNrDsmHI4WBWJdpPnuE/PLqHzS+8wefnX8Z/+aM5jKlz4dnKM6KchSStSR7juaZEv4ck7ZC0tqDtLkkNkjZJuqCgX4OkHxb0Oyhpe/KaX844zWrFG7l2vvC9p/l/2Te455Zrue/zcx0M1i99DgdJ04EZEbEAWCJpZpF+8wAiYiEwR9L05HnStwKfAJZHRLukccCjEbEIOCRpYbKKJyJicfLaXca+mdWE5187yu3f+SX/cvgdfvClDF+5+Sr/Isn6rZwjh6XAxuSv+cnAwiL9ZgF7kumngLnATUA7sBVYCxARxyPiF0m/N4ETyfTNyVHHA/L/dLMePfZ8C3d87xnGjBrBT//84yz50JS0h2RVomQ4SFoh6cn8C5gKnKTzy30VUOx/4z5gUTK9AJgATAPGAcuAKyXdULCdWcCNEfGrpGl1ctRRB9zSw7hWSWqU1Nja2lp6T82qSEdH8MDPXuRrG5/nw9Mn8ti//gOunjI+7WFZFSkZDhGxISI+lX8Bh4B1wD3AGOBwkeWywH5JW4FRQA44DjRERAfQAFwDIOkS4FvA3QXLb0omNwOze1j/+ojIRESmvr6+t/trNuy9c/I0f/73z/Hff/4Kd2Sm87++fBOTxo1Oe1hWZco5rbQNaI6IJmA58Mx5+t4bEcuAAHYBu+k8LQWdX/gvJ9MPAHdHxDEASZMkLUnmZYBXyhinWdV5/Wg7n//uM/zsNwf5z5+9jv/6x9czuq6s35WYnVef/1dFxAFgp6RdQFtE7AWQNFHSg/l+ksYDmyU1AFsi4khEvA48K+lpYFREPCfpo8Bi4JHkl0m3AceAO5NlrwUe699umg1/uw+8xW3f+SWvvfkOP/zTj/DlP7jShWcbNIqItMfQb5lMJhobG9Mehtmg+elzzfzHn2SZNnEsP/hShpmTXV+w/pO0OyIyPc3zRXB21sHcuzy57xDD/8+F6vLSwTYe3nmAj131Af7uizfyftcXbAg4HOysv9n2Io80Nqc9DOvBF2+6nL+8bTajRrq+YEPD4WBnvdCc4w9mXsy377gh7aFYgVEjxcQLfbRgQ8vhYEDnA2Fe/v3bfPq6KdSP943azGqdj1ENgN+8cYwzHeFnCpsZ4HCwRFNLDoDrHQ5mhsPBEtnmHB8YN5ppF/mh82bmcLBEtiXHnEsv8kVVZgY4HIz3itE+pWRmeQ4HY5+L0WbWhcPB3itGX+ZwMLNODgcj25Jj0rjRXOJitJklHA5GtuWYi9Fmdg6HQ41799QZXj7UxlzXG8ysgMOhxv32YBunXYw2sy4cDjUu62K0mfXA4VDjss1HXYw2s27KCgdJayTtlLSmRL+HJO2QtLag7S5JDZI2SbogaTuYPCJ0u6T5fdmG9Y+L0WbWkz6Hg6TpwIyIWAAskTSzSL95ABGxEJgjabqkscCtwCeA5RHRnnR/IiIWJ6/dvd2G9U++GH39pRPSHoqZVZhyjhyWAhuTv/AnAwuL9JsF7EmmnwLmAjcB7cBWYG1B35uTI4wH1PknbG+3Yf2QL0b7thlm1lXJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6YZk/urkCKMOuIXOQDjvNiStktQoqbG1tbU3+2pd5IvR/qWSmXVVMhwiYkNEfCr/Ag4B64B7gDHA4SLLZYH9krYCo4AccBxoiIgOoAG4Jum7KVlsMzAbaC21jYhYHxGZiMjU19f3YZctr6k5x/svHMWlEy9IeyhmVmHKOa20DWiOiCZgOfDMefreGxHLgAB2AbvpPGUEnSHwsqRJkpYkbRnglT5uw8rk23SbWTF9DoeIOADslLQLaIuIvQCSJkp6MN9P0nhgs6QGYEtEHImI14FnJT0NjIqI54BjwJ1Jv2uBx4ptwwbOu6fO8NKhNtcbzKxHioi0x9BvmUwmGhsb0x7GsLLntaPcvu6XfPeLN/KH109LezhmlgJJuyMi09M8XwRXo1yMNrPzcTjUqKaWHBMvHMVl73cx2sy6czjUqGxLjutdjDazIhwONejE6c5itE8pmVkxDoca9OLBNk6d8ZXRZlacw6EGnb1Nt8PBzIpwONSgppYcF13gYrSZFedwqEEuRptZKQ6HGnPi9BlePNjmJ7+Z2Xk5HGqMi9Fm1hsOhxrjYrSZ9YbDoca4GG1mveFwqDEuRptZbzgcaki+GO0ro82sFIdDDXnp4NsuRptZrzgcaoiL0WbWWw6HGpJNitHTJ7kYbWbnV1Y4SFojaaekNSX6PSRph6S1BW13SWqQtEnSBZIul7Q9eb0m6XNJv4MF7fPLGaedq6klx5xLJ7gYbWYl9TkcJE0HZkTEAmCJpJlF+s0DiIiFwBxJ0yWNBW4FPgEsj4j2iHg1IhZHxGKgCdiarOKJfHtE7O77rlmhk6c7XIw2s14r58hhKbAx+Wt+MrCwSL9ZwJ5k+ilgLnAT0E5nAKwt7CzpKqAlItqTppuTo44H5D91++2lQ22cPNPheoOZ9UrJcJC0QtKT+RcwFThJ55f7KmBKkUX3AYuS6QXABGAaMA5YBlwp6YaC/ncAGwver06OOuqAW3oY1ypJjZIaW1tbS+1GzXMx2sz6omQ4RMSGiPhU/gUcAtYB9wBjgMNFlssC+yVtBUYBOeA40BARHUADcE3BIkuB7QXLb0omNwOze1j/+ojIRESmvr6+5I7WumxLjglj67h80oVpD8XMhoFyTittA5ojoglYDjxznr73RsQyIIBdwG46QwA6v/BfhrOnlA5GxOnk/SRJS5J+GeCVMsZpBTqL0b4y2sx6p8/hEBEHgJ2SdgFtEbEXQNJESQ/m+0kaD2yW1ABsiYgjEfE68Kykp4FREfFc0v124PGCzRwD7kyWvRZ4rIx9s8TJ0x389o02n1Iys16rK2ehiLgfuL9L21FgdcH7NjprC12X/Svgr7q0fbvL+9PAynLGZt3li9H+pZKZ9ZYvgqsBTS5Gm1kfORxqQLYlx/ixdcz4gIvRZtY7DocakG3JMecSF6PNrPccDlXubDHaz4w2sz5wOFQ5XxltZuVwOFQ5F6PNrBwOhyrnYrSZlcPhUOWaXIw2szI4HKrYqTMd7DvoYrSZ9Z3DoYq9dKiNk6d9ZbSZ9Z3DoYq5GG1m5XI4VLFsS47xY+qY4dt0m1kfORyqWLblGLMvncCIES5Gm1nfOByq1KkzHex745hPKZlZWRwOVerlQ2+7GG1mZXM4VCkXo82sPxwOVSrbkuN9Y+q44gPj0h6KmQ1DDocqlW3JMfsSF6PNrDxlhYOkNZJ2SlpTot9DknZIWpu8v1zS9uT1mqTPFVtfb7dh3Z12MdrM+qnP4SBpOjAjIhYASyTNLNJvHkBELATmSJoeEa9GxOKIWAw0AVt7Wl9vt2E9e/n3b3PidIdvm2FmZSvnyGEpsFHSfGAysLBIv1nAnmT6KWBufoakq4CWiGgvsr6S25C0SlKjpMbW1tYydqN6ZZNitH+pZGblKhkOklZIejL/AqYCJ4G1wCpgSpFF9wGLkukFwISCeXcAG5PpyT2sr6e2c0TE+ojIRESmvr6+1G7UlGxzZzH6ShejzaxMdaU6RMQGYEP+vaSVwDpgJTAOOFxkuayk/ZK2AkeAXMHspcB9yXRrkfWV3Ib1LNuS4zoXo82sH8o5rbQNaI6IJmA58Mx5+t4bEcuAAHbB2VNKByPi9HnW15dtWAEXo81sIPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J44HNkhqALRFxJJl1O/D4+dZXbBtWWr4YPdfFaDPrB0VE2mPot0wmE42NjWkPoyI80vga33j0Bf7p64v4YP370h6OmVUwSbsjItPTPF8EV2WaWlyMNrP+czhUGRejzWwgOByqiIvRZjZQHA5V5JXWt3n3VIfDwcz6zeFQRbLNvjLazAaGw6GKNLXkGDd6JFdd7GK0mfWPw6GKdN6m+yIXo82s3xwOVeL0mQ5+88Yxn1IyswHhcKgSv2s93lmMvmxC6c5mZiU4HKpE1s+MNrMB5HCoEk0tOS4cPZIrL/YtM8ys/xwOVSL/zOiRLkab2QBwOFSB02c6+M3rLkab2cBxOFSB37Uep/3UGdcbzGzAOByqgIvRZjbQHA5VIF+MvsrPbzCzAeJwqALZlhzXTXMx2swGTlnhIGmNpJ2S1pTo95CkHZLWJu8vl7Q9eb0m6XMF/Rok/bBg2YMFfeeXM85acKYjXIw2swHX53CQNB2YERELgCWSZhbpNw8gIhYCcyRNj4hXI2JxRCwGmoCtksYBj0bEIuCQpIXJKp7I942I3WXsW034XevbtJ8642dGm9mAKufIYSmwMflrfjKwsEi/WcCeZPopYG5+hqSrgJaIaI+I4xHxi2TWm8CJZPrm5KjjAUndzpdIWiWpUVJja2trGbtRHfK36XYx2swGUslwkLRC0pP5FzAVOAmsBVYBU4osug9YlEwvAApv+nMHsLHLdmYBN0bEr5Km1clRRx1wS9eVR8T6iMhERKa+vr7UblStrIvRZjYISoZDRGyIiE/lX8AhYB1wDzAGOFxkuSywX9JWYBSQK5i9FNiefyPpEuBbwN0Fy29KJjcDs/uwTzWlycVoMxsE5ZxW2gY0R0QTsBx45jx9742IZUAAu+DsKaWDEXG6oN8DwN0RcSzpM0nSkmReBniljHFWvTMdwV4Xo81sEPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J44HNkhqALRFxJJl1O/B4Qb+PAouBR5JfJt0GHAPuTJa9FnisnJ2rdvuTYrTrDWY20OrKWSgi7gfu79J2FFhd8L4NWNbDst/u8v5XwLQeNrOynLHVkrNXRvuXSmY2wHwR3DCWbclxwaiRfNDFaDMbYA6HYaypJcd1vk23mQ0Ch8MwlS9Gu95gZoPB4TBM/fPht3nn5Bn/UsnMBoXDYZjybbrNbDA5HIapbPMxxo4awQfrx6U9FDOrQg6HYSp/ZXTdSP8TmtnA8zfLMHSmI2h6PedTSmY2aBwOw5CL0WY22BwOw5CvjDazweZwGIbyxeiZvjLazAaJw2EYamrJca2L0WY2iPztMsx0dAR7XYw2s0HmcBhm9h8+zvGTvk23mQ0uh8Mw0+RitJkNAYfDMJNtybkYbWaDzuEwzGRdjDazIVDWN4ykNZJ2SlpTot9DknZIWpu8vzx5FOh2Sa9J+lzSfrCgfX5ftlFLOjqC3/g23WY2BPocDpKmAzMiYgGwRNLMIv3mAUTEQmCOpOkR8WpELI6IxUATsDXp/kS+PSJ293Ybteafjxzn7ROnfWW0mQ26co4clgIbk7/wJwMLi/SbBexJpp8C5uZnSLoKaImI9qTp5uQI4wFJ6s02JK2S1CipsbW1tYzdGH6afJtuMxsiJcNB0gpJT+ZfwFTgJLAWWAVMKbLoPmBRMr0AmFAw7w5gY8H71ckRRh1wC52BcN5tRMT6iMhERKa+vr7UblSFbHOOMXUjmDXZxWgzG1x1pTpExAZgQ/69pJXAOmAlMA44XGS5rKT9krYCR4BcweylwH0FfTclk5uBG4DW3myj1rgYbWZDpZxvmW1Ac0Q0AcuBZ87T996IWAYEsAvOnlI6GBGnk/eTJC1J+meAV/q4jZrQ4WdGm9kQ6nM4RMQBYKekXUBbROwFkDRR0oP5fpLGA5slNQBbIuJIMut24PGCVR4D7kz6XQs8VmwbtexfkmK0w8HMhkLJ00o9iYj7gfu7tB0FVhe8bwOW9bDst7u8P03n6aOS26hl+dt0+5dKZjYUfPJ6mMg25xhdN4JZU1yMNrPB53AYJvLF6FEuRpvZEPA3zTDwXjF6QunOZmYDwOEwDLgYbWZDzeEwDLgYbWZDzeEwDDS1dBajr54yPu2hmFmNcDgMA9mWHNdOHe9itJkNGX/bVLiOjmBvyzGfUjKzIeVwqHAH3nyHthOnmevHgprZEHI4VDgXo80sDQ6HCuditJmlweFQ4bLNLkab2dDzN04FiwiaXs/5lJKZDTmHQwU7cOQd2t71ldFmNvQcDhXMxWgzS4vDoYI1teQYPdLFaDMbeg6HCpZtyfGhaeMZXed/JjMbWmV960haI2mnpDUl+j0kaYektcn7yyVtT16vSfpcT21J34MF7fPLGedwFhFkW1yMNrN09PkxoZKmAzMiYoGk/ytpU0S80kO/eQARsVDSI5KmR8SrwOJk/hZga0S0d21LVvFERPxpGftUFVyMNrM0lXPksBTYmPw1PxlYWKTfLGBPMv0UMDc/Q9JVQEsSDMXabk6OOh6QpK4rl7RKUqOkxtbW1jJ2o7Lli9EOBzNLQ8lwkLRC0pP5FzAVOAmsBVYBU4osug9YlEwvAAofY3YHsLFL/65tqyNiIZ1HN7d0XXlErI+ITERk6uvrS+3GsONitJmlqWQ4RMSGiPhU/gUcAtYB9wBjgMNFlssC+yVtBUYBuYLZS4HtXRY5py0iNiWTm4HZvdmZapJtyXHNVBejzSwd5XzzbAOaI6IJWA48c56+90bEMiCAXXD29NHBiDid79S1TdIkSUuS2RmgW02jmkUETS5Gm1mK+hwOEXEA2ClpF9AWEXsBJE2U9GC+n6TxwGZJDcCWiDiSzLodeLzLaru2HQPuTJa9Fnisr+Mczl598x2OuRhtZinq86+VACLifuD+Lm1HgdUF79uAZT0s++1SbckRxMpyxlYNXIw2s7T5hHYFyrbkGDVSXD31fWkPxcxqlMOhAjUlxegxdSPTHoqZ1SiHQ4XpLEYf8yklM0uVw6HCvPZmO7n2U1x/6cS0h2JmNczhUGFcjDazSuBwqDAuRptZJXA4VBgXo82sEjgcKkj+Nt0+pWRmaXM4VJDmtzqL0b5thpmlzeFQQVyMNrNK4XCoIPli9DVTfZtuM0uXw6GCNLXkuHqKi9Fmlj6HQ4WICF5odjHazCqDw6FCuBhtZpXE4VAhXIw2s0ricKgQ2ZYcdSNcjDazyuBwqBD5YvTYUS5Gm1n6ygoHSWsk7ZS0pkS/hyTtkLS2oO2bkn4h6SeS6pK2OyTtknRfQb9ubdXKV0abWaXpczhImg7MiIgFwBJJM4v0mwcQEQuBOZKmSxoNLIiIm4GXgIykC4FbI+Im4H2SlvTUVt7uDQ/Nb7Vz9J1TzLnM4WBmlaGcZ0gvBTZKmg9MBhYCr/TQbxawJ5l+CpgbEZsljZZ0A3A18CLwMWCzpMuBDwGLAPXQ9vPClUtaBawCuPzyy8vYDfjtwWP82//967KWHUjvnDwDuBhtZpWjZDhIWgGsKGh6CngeWEvnl/Nniiy6D/gL4AfAAuBo0r4D+Bbwu6RtMpAD/gb4EvDNIm3niIj1wHqATCYTpfajJ2PrRjJrSmXcGvuT105mziUT0h6GmRnQi3CIiA3Ahvx7SSuBdcBKYBxwuMhyWUn7JW0FjgA5SR8BJkTEUkl/AXwBaAXuA74BnEjW11PbgLvi4nH83RfnD8aqzcyGtXIK0tuA5ohoApYDz5yn770RsQwIYBcwFTidzDsCTEqWfwt4Mlnf00XazMxsiPQ5HCLiALBT0i6gLSL2AkiaKOnBfD9J4+msGzQAWyLiCLAFuCJpuw14OCKOA98HngWuS/p2a+vHPpqZWR8poqzT9RUlk8lEY2Nj2sMwMxtWJO2OiExP83wRnJmZdeNwMDOzbhwOZmbWjcPBzMy6cTiYmVk3VfFrJUmtwIF+rOJiBulCu2HIn8W5/Hm8x5/Fuarh85gREfU9zaiKcOgvSY3Ffs5Va/xZnMufx3v8WZyr2j8Pn1YyM7NuHA5mZtaNw6HT+rQHUEH8WZzLn8d7/Fmcq6o/D9cczMysGx85mJlZNw4HMzPrpubDQdIaSTslrUl7LGmT9JCkBkk/THsslULShyX5lvGApLuS/x+bJF2Q9njSJOmbkn4h6SeSynnccsWr6XCQNJ3Oi0AWAEskzUx7TGmRNA54NCIWAYckLUx7TGmTNAL4MjAq7bGkTdJY4FbgE8DyiGhPeUipkTQaWBARNwMvAVV5rUNNhwOwFNgoaT6dz62u2S/EiDgeEb9I3r5J5+NZa91XgP+Z9iAqxE1AO7CVzufH16yIOAmMlnQDcDXwYrojGhy1Hg6TgZN0/mdfBUxJdzjpkzQLuDEifpX2WNIkaSpwaUT8Ou2xVIhpdD4zfhlwZfLFWMt2AN8Cfg8cTXcog6PWw6EVWAfcA4xh+N8npV8kXULnf/i70x5LBfhj4NOStgPzJX015fGk7TjQEBEdQANwTcrjSY2kjwATImIpsB/4QspDGhS1Hg7bgOaIaAKWA8+kPJ60PQDcHRHH0h5I2iJiXUR8LCIWA7sj4rtpjyllu+k8DQswG3g5xbGkbSpwOpk+AkxKcSyDpqbDISIOADsl7QLaImJv2mNKi6SPAouBRyRtl3RbykOyChIRrwPPSnoaGBURz6U9phRtAa6Q1ADcBjyc8ngGha+QNjOzbmr6yMHMzHrmcDAzs24cDmZm1o3DwczMunE4mJlZNw4HMzPrxuFgZmbd/H8njNt/3qLGsgAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "
Perform external calculations on the obtained candidate points, and register the actions and their scores in a file. The process of reading the file again, running the Bayesian optimization, and obtaining the next candidate point is repeated to advance the Bayesian optimization.
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# -*- coding:utf-8 -*-
+importnumpyasnp
+
+
+
+[ドキュメント]
+classfourier:
+"""
+ random feature maps
+ ``Psi(X; W,b) = cos[X * Wt + b] * alpha``
+ where
+
+ - X: input, N-by-d matrix
+ - W: weight, l-by-d matrix
+ - Wt: transpose of W
+ - b: bias, 1-by-l matrix
+ - alpha: coefficient
+
+ and
+
+ - N: number of data
+ - d: dimension of input
+ - l: number of basis
+
+ Attributes
+ ==========
+ params: Tuple
+ W, b, alpha
+ nbasis: int
+ number of basis
+
+ References
+ ==========
+ A. Rahimi and B. Recht, "Random features for large-scale kernel machines,"
+ in "Advances in neural information processing systems," 2007, pp. 1177-1184.
+ """
+
+ def__init__(self,params):
+"""
+ Parameters
+ ----------
+ params: Tuple
+ W, b, alpha
+ """
+ self._check_params(params)
+ self._check_len_params(params)
+ self.params=params
+ self.nbasis=self.params[1].shape[0]
+
+
+
+
+ def_check_params(self,params):
+"""
+ Parameters
+ ==========
+ params: tuple
+ W, b, alpha
+
+ Raises
+ ======
+ ValueError
+ if ``params`` is not a 3-dimensional tuple
+ """
+ ifnotisinstance(params,tuple):
+ raiseValueError("The variable < params > must be a tuple.")
+
+ iflen(params)!=3:
+ raiseValueError("The variable < params > must be 3-dimensional tuple.")
+
+ def_check_len_params(self,params):
+"""
+ Parameters
+ ==========
+ params: tuple
+ W, b, alpha
+
+
+ Raises
+ ======
+ ValueError
+ when dim of W and b are mismatch
+ or alpha is not a scalar
+ """
+ ifparams[0].shape[0]!=params[1].shape[0]:
+ raiseValueError(
+ "The length of 0-axis of W must be same as the length of b."
+ )
+
+ ifhasattr(params[2],"__len__"):
+ iflen(params[2])!=1:
+ raiseValueError("The third entry of <params> must be a scalar.")
+ else:
+ ifisinstance(params[2],str):
+ raiseValueError("The third entry of <params> must be a scalar.")
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+from..importinf
+
+
+
+[ドキュメント]
+classmodel:
+"""
+ Baysean Linear Model
+
+ Attributes
+ ==========
+ prior: physbo.blm.prior.gauss
+ prior distribution of weights
+ lik: physbo.blm.lik.gauss
+ kernel
+ nbasis: int
+ number of features in random feature map
+ stats: Tuple
+ auxially parameters for sampling
+ method: str
+ sampling method
+ """
+
+ def__init__(self,lik,prior,options={}):
+ self.prior=prior
+ self.lik=lik
+ self.nbasis=self.lik.linear.basis.nbasis
+ self._init_prior(prior)
+ self._set_options(options)
+ self.stats=()
+
+
+[ドキュメント]
+ defprepare(self,X,t,Psi=None):
+"""
+ initializes model by using the first training dataset
+
+ Parameters
+ ==========
+ X: numpy.ndarray
+ inputs
+ t: numpy.ndarray
+ target (label)
+ Psi: numpy.ndarray
+ feature maps
+
+ See also
+ ========
+ physbo.blm.inf.exact.prepare
+ """
+ ifself.method=="exact":
+ inf.exact.prepare(blm=self,X=X,t=t,Psi=Psi)
+ else:
+ pass
+
+
+
+[ドキュメント]
+ defupdate_stats(self,x,t,psi=None):
+"""
+ updates model by using another training data
+
+ Parameters
+ ==========
+ x: numpy.ndarray
+ input
+ t: float
+ target (label)
+ psi: numpy.ndarray
+ feature map
+
+ See also
+ ========
+ physbo.blm.inf.exact.update_stats
+ """
+ ifself.method=="exact":
+ self.stats=inf.exact.update_stats(self,x,t,psi)
+ else:
+ pass
+
+
+
+[ドキュメント]
+ defget_post_params_mean(self):
+"""
+ calculates posterior mean of weights
+
+ Returns
+ =======
+ numpy.ndarray
+
+ See also
+ ========
+ physbo.blm.inf.exact.get_post_params_mean
+ """
+ ifself.method=="exact":
+ self.lik.linear.params=inf.exact.get_post_params_mean(blm=self)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+importphysbo.miscasmisc
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classlinear:
+"""
+
+ Attributes
+ ==========
+ basis:
+ basis for random feature map
+ nbasis: int
+ number of basis
+ bias:
+ params:
+ _init_params:
+ initial value of the parameter
+ """
+
+ def__init__(self,basis,params=None,bias=None):
+ self.basis=basis
+ self.nbasis=basis.nbasis
+ self._init_params=params
+ self.bias=bias
+ self.params=params
+
+ ifparamsisNone:
+ self.params=np.zeros(self.nbasis)
+ self.nparams=self.nbasis
+
+
+[ドキュメント]
+ defget_mean(self,X,Psi=None,params=None,bias=None):
+"""
+ calculate mean values
+
+ Parameters
+ ==========
+ X: numpy.ndarray
+ input as an N-by-d matrix
+ Psi: numpy.ndarray
+ feature maps ``Psi(X)`` as an N-by-l matrix
+ (default: self.get_basis(X))
+ params: numpy.ndarray
+ weight as a vector with size l
+ (default: self.params)
+ bias: float
+ (default: self.bias)
+
+ Returns
+ =======
+ numpy.ndarray
+ Psi * params + bias
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ ifbiasisNone:
+ bias=np.copy(self.bias)
+
+ ifPsiisNone:
+ Psi=self.get_basis(X)
+
+ returnPsi.dot(params)+bias
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importphysbo.predictor
+
+
+
+[ドキュメント]
+ deffit(self,training,num_basis=None):
+"""
+ fit model to training dataset
+
+ Parameters
+ ==========
+ training: physbo.variable
+ dataset for training
+ num_basis: int
+ the number of basis (default: self.config.predict.num_basis)
+ """
+ ifnum_basisisNone:
+ num_basis=self.config.predict.num_basis
+
+ ifself.model.prior.cov.num_dimisNone:
+ self.model.prior.cov.num_dim=training.X.shape[1]
+ self.model.fit(training.X,training.t,self.config)
+ self.blm=self.model.export_blm(num_basis)
+ self.delete_stats()
+
+
+
+[ドキュメント]
+ defprepare(self,training):
+"""
+ initializes model by using training data set
+
+ Parameters
+ ==========
+ training: physbo.variable
+ dataset for training
+ """
+ self.blm.prepare(training.X,training.t,training.Z)
+[ドキュメント]
+ defget_post_fmean(self,training,test):
+"""
+ calculates posterior mean value of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_fmean(test.X,test.Z)
+
+
+
+[ドキュメント]
+ defget_post_fcov(self,training,test):
+"""
+ calculates posterior variance-covariance matrix of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_fcov(test.X,test.Z)
+
+
+
+[ドキュメント]
+ defget_post_params(self,training,test):
+"""
+ calculates posterior weights
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs (not used)
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.get_post_params_mean()
+
+
+
+[ドキュメント]
+ defget_post_samples(self,training,test,N=1,alpha=1.0):
+"""
+ draws samples of mean values of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+
+ Returns
+ =======
+ numpy.ndarray
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.post_sampling(test.X,Psi=test.Z,N=N,alpha=alpha)
+
+
+
+[ドキュメント]
+ defget_predict_samples(self,training,test,N=1):
+"""
+ draws samples of values of model
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+
+ Returns
+ =======
+ numpy.ndarray (N x len(test))
+ """
+ ifself.blm.statsisNone:
+ self.prepare(training)
+ returnself.blm.predict_sampling(test.X,Psi=test.Z,N=N).transpose()
+
+
+
+[ドキュメント]
+ defupdate(self,training,test):
+"""
+ updates the model.
+
+ If not yet initialized (prepared), the model will be prepared by ``training``.
+ Otherwise, the model will be updated by ``test``.
+
+ Parameters
+ ==========
+ training: physbo.variable
+ training dataset for initialization (preparation).
+ If already prepared, the model ignore this.
+ test: physbo.variable
+ training data for update.
+ If not prepared, the model ignore this.
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnNone
+
+ ifhasattr(test.t,"__len__"):
+ N=len(test.t)
+ else:
+ N=1
+
+ ifN==1:
+ iftest.ZisNone:
+ iftest.X.ndim==1:
+ self.blm.update_stats(test.X,test.t)
+ else:
+ self.blm.update_stats(test.X[0,:],test.t)
+ else:
+ iftest.Z.ndim==1:
+ self.blm.update_stats(test.X,test.t,psi=test.Z)
+ else:
+ self.blm.update_stats(test.X[0,:],test.t,psi=test.Z[0,:])
+ else:
+ forninrange(N):
+ iftest.ZisNone:
+ self.blm.update_stats(test.X[n,:],test.t[n])
+ else:
+ self.blm.update_stats(test.X[n,:],test.t[n],psi=test.Z[n,:])
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classcov_const:
+"""
+ isotropic variance-covariance
+
+ All elements have the same variance and are independent with each other
+
+ Attributes
+ ==========
+ params: float
+ half of log of covariance
+ sigma2: float
+ covariance
+ prec: float
+ precision (= inv. of covariance)
+ """
+
+ def__init__(self,params=None):
+"""
+ Parameters
+ ==========
+ params: float
+ half of log of covariance
+ (default: numpy.log(1))
+ """
+ ifparamsisNone:
+ self.params=np.log(1)
+ self.sigma2,self.prec=self._trans_params(params)
+
+
+[ドキュメント]
+ defget_cov(self,nbasis,params=None):
+"""
+ computes the covariance
+
+ Parameters
+ ==========
+ nbasis: int
+ the number of components
+ params: float
+ half of log of variance
+ (default: self.params)
+
+ Returns
+ =======
+ numpy.ndarray
+ nbasis-by-n-basis covariance matrix
+ """
+ ifparamsisNone:
+ params=self.params
+ sigma2,prec=self._trans_params(params)
+ returnnp.identity(nbasis)*sigma2
+
+
+
+[ドキュメント]
+ defget_prec(self,nbasis,params=None):
+"""
+ computes the precision
+
+ Parameters
+ ==========
+ nbasis: int
+ the number of components
+ params: float
+ half of log of variance
+ (default: self.params)
+
+ Returns
+ =======
+ numpy.ndarray
+ nbasis-by-n-basis precision matrix
+ """
+ ifparamsisNone:
+ params=self.params
+ sigma2,prec=self._trans_params(params)
+ returnnp.identity(nbasis)*prec
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# coding=utf-8
+importnumpyasnp
+importscipy.optimize
+
+
+
+[ドキュメント]
+ defrun(self,X,t):
+"""
+ Performing optimization using the L-BFGS-B algorithm
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+ """
+ batch_size=self.config.learning.batch_size
+ sub_X,sub_t=self.gp.sub_sampling(X,t,batch_size)
+
+ ifself.config.learning.num_init_params_search!=0:
+ is_init_params_search=True
+ else:
+ is_init_params_search=False
+
+ ifis_init_params_search:
+ params=self.init_params_search(sub_X,sub_t)
+ else:
+ params=np.copy(self.gp.params)
+
+ params=self.one_run(params,sub_X,sub_t)
+ returnparams
+
+
+
+[ドキュメント]
+ defone_run(self,params,X,t,max_iter=None):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Initial guess for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ max_iter: int
+ Maximum number of iterations to perform.
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+ """
+
+ # is_disp: Set to True to print convergence messages.
+ is_disp=True
+
+ ifmax_iterisNone:
+ is_disp=self.config.learning.is_disp
+ max_iter=int(self.config.learning.max_iter)
+
+ args=(X,t)
+ bound=self.gp.get_params_bound()
+ res=scipy.optimize.minimize(
+ fun=self.gp.eval_marlik,
+ args=args,
+ x0=params,
+ method="L-BFGS-B",
+ jac=self.gp.get_grad_marlik,
+ bounds=bound,
+ options={"disp":is_disp,"maxiter":max_iter},
+ )
+
+ returnres.x
+
+
+
+[ドキュメント]
+ definit_params_search(self,X,t):
+"""
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The parameters which give the minimum marginal likelihood.
+ """
+ num_init_params_search=self.config.learning.num_init_params_search
+ max_iter=int(self.config.learning.max_iter_init_params_search)
+ min_params=np.zeros(self.gp.num_params)
+ min_marlik=np.inf
+
+ foriinrange(num_init_params_search):
+ params=self.gp.get_cand_params(X,t)
+ params=self.one_run(params,X,t,max_iter)
+ marlik=self.gp.eval_marlik(params,X,t)
+
+ ifmin_marlik>marlik:
+ min_marlik=marlik
+ min_params=params
+
+ # print 'minimum marginal likelihood = ', min_marlik
+ returnmin_params
+
+
+
+
+
+[ドキュメント]
+classonline(object):
+"""
+ base class for online learning
+ """
+
+ def__init__(self,gp,config):
+"""
+
+ Parameters
+ ----------
+ gp : model (gp.core.model)
+ config: set_config (misc.set_config)
+ """
+ self.gp=gp
+ self.config=config
+ self.num_iter=0
+
+
+[ドキュメント]
+ defrun(self,X,t):
+"""
+ Run initial search and hyper parameter running.
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+
+ """
+ ifself.config.learning.num_init_params_search!=0:
+ is_init_params_search=True
+ else:
+ is_init_params_search=False
+
+ is_disp=self.config.learning.is_disp
+ ifis_init_params_search:
+ ifis_disp:
+ print("Start the initial hyper parameter searching ...")
+ params=self.init_params_search(X,t)
+ ifis_disp:
+ print("Done\n")
+ else:
+ params=np.copy(self.params)
+
+ ifis_disp:
+ print("Start the hyper parameter learning ...")
+ params=self.one_run(params,X,t)
+ ifis_disp:
+ print("Done\n")
+
+ returnparams
+
+
+
+[ドキュメント]
+ defone_run(self,params,X,t,max_epoch=None,is_disp=False):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ max_epoch: int
+ Maximum candidate epochs
+ Returns
+ -------
+ numpy.ndarray
+ The solution of the optimization.
+
+ """
+ num_data=X.shape[0]
+ batch_size=self.config.learning.batch_size
+
+ ifbatch_size>num_data:
+ batch_size=num_data
+
+ ifmax_epochisNone:
+ max_epoch=self.config.learning.max_epoch
+ is_disp=self.config.learning.is_disp
+
+ num_disp=self.config.learning.num_disp
+ eval_size=self.config.learning.eval_size
+ eval_X,eval_t=self.gp.sub_sampling(X,t,eval_size)
+ timing=range(0,max_epoch,int(np.floor(max_epoch/num_disp)))
+ temp=0
+
+ fornum_epochinrange(0,max_epoch):
+ perm=np.random.permutation(num_data)
+
+ ifis_dispandtemp<num_dispandnum_epoch==timing[temp]:
+ self.disp_marlik(params,eval_X,eval_t,num_epoch)
+ temp+=1
+
+ forninrange(0,num_data,batch_size):
+ tmp_index=perm[n:n+batch_size]
+ iflen(tmp_index)==batch_size:
+ self.num_iter+=1
+ subX=X[tmp_index,:]
+ subt=t[tmp_index]
+ params+=self.get_one_update(params,subX,subt)
+
+ ifis_disp:
+ self.disp_marlik(params,eval_X,eval_t,num_epoch+1)
+
+ self.reset()
+ returnparams
+
+
+
+[ドキュメント]
+ defdisp_marlik(self,params,eval_X,eval_t,num_epoch=None):
+"""
+ Displaying marginal likelihood
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ eval_X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ eval_t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ num_epoch: int
+ Number of epochs
+
+ Returns
+ -------
+
+ """
+ marlik=self.gp.eval_marlik(params,eval_X,eval_t)
+ ifnum_epochisnotNone:
+ print(num_epoch,end=" ")
+ print("-th epoch",end=" ")
+
+ print("marginal likelihood",marlik)
+
+
+
+[ドキュメント]
+ definit_params_search(self,X,t):
+"""
+ Initial parameter searchs
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+
+ Returns
+ -------
+ numpy.ndarray
+ The parameter which gives the minimum likelihood.
+ """
+ num_init_params_search=self.config.learning.num_init_params_search
+ is_disp=self.config.learning.is_disp
+ max_epoch=self.config.learning.max_epoch_init_params_search
+ eval_size=self.config.learning.eval_size
+ eval_X,eval_t=self.gp.sub_sampling(X,t,eval_size)
+ min_params=np.zeros(self.gp.num_params)
+ min_marlik=np.inf
+
+ foriinrange(num_init_params_search):
+ params=self.gp.get_cand_params(X,t)
+
+ params=self.one_run(params,X,t,max_epoch)
+ marlik=self.gp.eval_marlik(params,eval_X,eval_t)
+
+ ifmin_marlik>marlik:
+ min_marlik=marlik
+ min_params=params
+
+ # print 'minimum marginal likelihood = ', min_marlik
+ returnmin_params
+[ドキュメント]
+ defget_one_update(self,params,X,t):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N-dimensional vector that represents the corresponding negative energy of search candidates.
+ Returns
+ -------
+
+ """
+ grad=self.gp.get_grad_marlik(params,X,t)
+ self.m=self.m*self.beta+grad*(1-self.beta)
+ self.v=self.v*self.gamma+grad**2*(1-self.gamma)
+ hat_m=self.m/(1-self.beta**(self.num_iter))
+ hat_v=self.v/(1-self.gamma**(self.num_iter))
+ return-self.alpha*hat_m/(np.sqrt(hat_v)+self.epsilon)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+fromphysboimportblm
+fromphysbo.gpimportinf
+fromphysbo.gp.coreimportlearning
+fromphysbo.gp.core.priorimportprior
+
+
+
+[ドキュメント]
+ defsub_sampling(self,X,t,N):
+"""
+ Make subset for sampling
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of data in subset
+ Returns
+ -------
+ subX: numpy.ndarray
+ subt: numpy.ndarray
+ """
+ num_data=X.shape[0]
+
+ ifNisnotNoneandN<num_data:
+ index=np.random.permutation(num_data)
+ subX=X[index[0:N],:]
+ subt=t[index[0:N]]
+ else:
+ subX=X
+ subt=t
+ returnsubX,subt
+
+
+
+[ドキュメント]
+ defexport_blm(self,num_basis):
+"""
+ Exporting the blm(Baysean linear model) predictor
+
+ Parameters
+ ----------
+ num_basis: int
+ Total number of basis
+ Returns
+ -------
+ physbo.blm.core.model
+ """
+ ifnothasattr(self.prior.cov,"rand_expans"):
+ raiseValueError("The kernel must be.")
+
+ basis_params=self.prior.cov.rand_expans(num_basis)
+ basis=blm.basis.fourier(basis_params)
+ prior=blm.prior.gauss(num_basis)
+ lik=blm.lik.gauss(
+ blm.lik.linear(basis,bias=self.prior.get_mean(1)),
+ blm.lik.cov(self.lik.params),
+ )
+ blr=blm.model(lik,prior)
+
+ returnblr
+
+
+
+[ドキュメント]
+ defeval_marlik(self,params,X,t,N=None):
+"""
+ Evaluating marginal likelihood.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of subset data (if not specified, all dataset is used)
+ Returns
+ -------
+ marlik: float
+ Marginal likelihood.
+ """
+ subX,subt=self.sub_sampling(X,t,N)
+ ifself.inf=="exact":
+ marlik=inf.exact.eval_marlik(self,subX,subt,params=params)
+ else:
+ pass
+
+ returnmarlik
+
+
+
+[ドキュメント]
+ defget_grad_marlik(self,params,X,t,N=None):
+"""
+ Evaluating gradiant of marginal likelihood.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters.
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ N: int
+ Total number of subset data (if not specified, all dataset is used)
+
+ Returns
+ -------
+ grad_marlik: numpy.ndarray
+ Gradiant of marginal likelihood.
+ """
+ subX,subt=self.sub_sampling(X,t,N)
+ ifself.inf=="exact":
+ grad_marlik=inf.exact.get_grad_marlik(self,subX,subt,params=params)
+ returngrad_marlik
+
+
+
+[ドキュメント]
+ defget_params_bound(self):
+"""
+ Getting boundary of the parameters.
+
+ Returns
+ -------
+ bound: list
+ An array with the tuple (min_params, max_params).
+ """
+ ifself.lik.num_params!=0:
+ bound=self.lik.get_params_bound()
+
+ ifself.prior.mean.num_params!=0:
+ bound.extend(self.prior.mean.get_params_bound())
+
+ ifself.prior.cov.num_params!=0:
+ bound.extend(self.prior.cov.get_params_bound())
+ returnbound
+
+
+
+[ドキュメント]
+ defprepare(self,X,t,params=None):
+"""
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ ifself.inf=="exact":
+ self.stats=inf.exact.prepare(self,X,t,params)
+ else:
+ pass
+[ドキュメント]
+ defprint_params(self):
+"""
+ Printing parameters
+ """
+ print("\n")
+ ifself.lik.num_params!=0:
+ print("likelihood parameter = ",self.lik.params)
+
+ ifself.prior.mean.num_params!=0:
+ print("mean parameter in GP prior: ",self.prior.mean.params)
+
+ print("covariance parameter in GP prior: ",self.prior.cov.params)
+ print("\n")
+
+
+
+[ドキュメント]
+ defget_cand_params(self,X,t):
+"""
+ Getting candidate for parameters
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ Returns
+ -------
+ params: numpy.ndarray
+ Parameters
+ """
+ params=np.zeros(self.num_params)
+ ifself.lik.num_params!=0:
+ params[0:self.lik.num_params]=self.lik.get_cand_params(t)
+
+ temp=self.lik.num_params
+
+ ifself.prior.mean.num_params!=0:
+ params[
+ temp:temp+self.prior.mean.num_params
+ ]=self.prior.mean.get_cand_params(t)
+
+ temp+=self.prior.mean.num_params
+
+ ifself.prior.cov.num_params!=0:
+ params[temp:]=self.prior.cov.get_cand_params(X,t)
+
+ returnparams
+
+
+
+[ドキュメント]
+ deffit(self,X,t,config):
+"""
+ Fitting function (update parameters)
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ config: physbo.misc.set_config object
+
+ """
+ method=config.learning.method
+
+ ifmethod=="adam":
+ adam=learning.adam(self,config)
+ params=adam.run(X,t)
+
+ ifmethodin("bfgs","batch"):
+ bfgs=learning.batch(self,config)
+ params=bfgs.run(X,t)
+
+ self.set_params(params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+
+
+[ドキュメント]
+ defdecomp_params(self,params):
+"""
+ decomposing the parameters to those of mean values and covariance matrix for priors
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ mean_params: numpy.ndarray
+ cov_params: numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ mean_params=params[0:self.mean.num_params]
+ cov_params=params[self.mean.num_params:]
+ returnmean_params,cov_params
+
+
+
+[ドキュメント]
+ defget_mean(self,num_data,params=None):
+"""
+ Calculating the mean value of priors
+
+ Parameters
+ ----------
+ num_data: int
+ Total number of data
+ params: numpy.ndarray
+ Parameters
+ Returns
+ -------
+ float
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ returnself.mean.get_mean(num_data,params[0:self.mean.num_params])
+
+
+
+[ドキュメント]
+ defget_cov(self,X,Z=None,params=None,diag=False):
+"""
+ Calculating the variance-covariance matrix of priors
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+ diag: bool
+ If X is the diagonalization matrix, true.
+ Returns
+ -------
+ numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ returnself.cov.get_cov(X,Z,params=params[self.mean.num_params:],diag=diag)
+
+
+
+[ドキュメント]
+ defget_grad_mean(self,num_data,params=None):
+"""
+ Calculating the gradiant of mean values of priors
+
+ Parameters
+ ----------
+ num_data: int
+ Total number of data
+ params: numpy.ndarray
+ Parameters
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ mean_params,cov_params=self.decomp_params(params)
+ returnself.mean.get_grad(num_data,params=mean_params)
+
+
+
+[ドキュメント]
+ defget_grad_cov(self,X,params=None):
+"""
+ Calculating the covariance matrix priors
+
+ Parameters
+ ----------
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ mean_params,cov_params=self.decomp_params(params)
+ returnself.cov.get_grad(X,params=cov_params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+# -*- coding:utf-8 -*-
+importnumpyasnp
+fromscipyimportspatial
+from._src.enhance_gaussimportgrad_width64
+
+
+
+[ドキュメント]
+classgauss:
+"""gaussian kernel"""
+
+ def__init__(
+ self,
+ num_dim,
+ width=3,
+ scale=1,
+ ard=False,
+ max_width=1e6,
+ min_width=1e-6,
+ max_scale=1e6,
+ min_scale=1e-6,
+ ):
+"""
+
+ Parameters
+ ----------
+ num_dim: int
+ width: float
+ scale: float
+ ard: bool
+ flag to use Automatic Relevance Determination (ARD).
+ max_width: float
+ Maximum value of width
+ min_width: float
+ Minimum value of width
+ max_scale: float
+ Maximum value of scale
+ min_scale: float
+ Minimum value of scale
+ """
+ self.ard=ard
+ self.num_dim=num_dim
+ self.scale=scale
+ self.max_ln_width=np.log(max_width)
+ self.min_ln_width=np.log(min_width)
+ self.max_ln_scale=np.log(max_scale)
+ self.min_ln_scale=np.log(min_scale)
+
+ ifself.ard:
+ # with ARD
+ self.num_params=num_dim+1
+ ifisinstance(width,np.ndarray)andlen(width)==self.num_dim:
+ self.width=width
+ else:
+ self.width=width*np.ones(self.num_dim)
+ else:
+ # without ARD
+ self.width=width
+ self.num_params=2
+
+ params=self.cat_params(self.width,self.scale)
+ self.set_params(params)
+
+
+[ドキュメント]
+ defprint_params(self):
+"""
+ show the current kernel parameters
+
+ """
+
+ print(" Parameters of Gaussian kernel \n ")
+ print(" width = ",+self.width)
+ print(" scale = ",+self.scale)
+ print(" scale2 = ",+self.scale**2)
+ print(" \n")
+[ドキュメント]
+ defsupp_params(self,params):
+"""
+ Set maximum (minimum) values for parameters when the parameter is greater(less) than this value.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ params: numpy.ndarray
+
+ """
+ index=np.where(params[0:-1]>self.max_ln_width)
+ params[index[0]]=self.max_ln_width
+
+ index=np.where(params[0:-1]<self.min_ln_width)
+ params[index[0]]=self.min_ln_width
+
+ ifparams[-1]>self.max_ln_scale:
+ params[-1]=self.max_ln_scale
+
+ ifparams[-1]<self.min_ln_scale:
+ params[-1]=self.min_ln_scale
+
+ returnparams
+
+
+
+[ドキュメント]
+ defdecomp_params(self,params):
+"""
+ decompose the parameters defined on the log region
+ into width and scale parameters
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ width: float
+ scale: float
+ """
+
+ width=np.exp(params[0:-1])
+ scale=np.exp(params[-1])
+ returnwidth,scale
+
+
+
+[ドキュメント]
+ defsave(self,file_name):
+"""
+ save the gaussian kernel
+
+ Parameters
+ ----------
+ file_name: str
+ file name to save the information of the kernel
+
+ """
+ kwarg={
+ "name":"gauss",
+ "params":self.params,
+ "ard":self.ard,
+ "num_dim":self.num_dim,
+ "max_ln_scale":self.max_ln_scale,
+ "min_ln_scale":self.min_ln_scale,
+ "max_ln_width":self.max_ln_width,
+ "min_ln_width":self.min_ln_width,
+ "num_params":self.num_params,
+ }
+ withopen(file_name,"wb")asf:
+ np.savez(f,**kwarg)
+
+
+
+[ドキュメント]
+ defload(self,file_name):
+"""
+ Recovering the Gaussian kernel from file
+ Parameters
+ ----------
+ file_name: str
+ file name to load the information of the kernel
+
+ """
+ temp=np.load(file_name)
+
+ self.num_dim=temp["num_dim"]
+ self.ard=temp["ard"]
+ self.max_ln_scale=temp["max_ln_scale"]
+ self.min_ln_scale=temp["min_ln_scale"]
+ self.max_ln_width=temp["max_ln_width"]
+ self.min_ln_width=temp["min_ln_width"]
+ params=temp["params"]
+ self.set_params(params)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+from...importmisc
+fromcopyimportdeepcopy
+
+
+
+[ドキュメント]
+defeval_marlik(gp,X,t,params=None):
+"""
+ Evaluating marginal likelihood.
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ marlik: float
+ Marginal likelihood.
+ """
+ ndata,ndims=X.shape
+ lik_params,prior_params=gp.decomp_params(params)
+
+ fmu=gp.prior.get_mean(ndata,params=prior_params)
+ G=gp.prior.get_cov(X,params=prior_params)
+ B=gp.lik.get_cov(ndata,params=lik_params)
+
+ A=G+B+1e-8*np.identity(ndata)
+ res=t-fmu
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ alpha=scipy.linalg.solve_triangular(
+ U.transpose(),res,lower=True,overwrite_b=False,check_finite=False
+ )
+ marlik=(
+ 0.5*ndata*np.log(2*np.pi)
+ +np.sum(np.log(np.diag(U)))
+ +0.5*np.inner(alpha,alpha)
+ )
+ returnmarlik
+
+
+
+
+[ドキュメント]
+defget_grad_marlik(gp,X,t,params=None):
+"""
+ Evaluating gradiant of marginal likelihood.
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ grad_marlik: numpy.ndarray
+ Gradiant of marginal likelihood.
+ """
+ ndata,ndims=X.shape
+ lik_params,prior_params=gp.decomp_params(params)
+
+ fmu=gp.prior.get_mean(ndata,prior_params)
+ G=gp.prior.get_cov(X,params=prior_params)
+ B=gp.lik.get_cov(ndata,lik_params)
+
+ A=G+B+1e-8*np.identity(ndata)
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ res=t-fmu
+ alpha=misc.gauss_elim(U,res)
+ invA=scipy.linalg.inv(A,check_finite=False)
+
+ grad_marlik=np.zeros(gp.num_params)
+
+""" lik """
+ ifgp.lik.num_params!=0:
+ lik_grad=gp.lik.get_grad(ndata,lik_params)
+ temp=lik_grad.dot(alpha)
+ grad_marlik[0:gp.lik.num_params]=-0.5*temp.dot(
+ alpha
+ )+0.5*misc.traceAB2(invA,lik_grad)
+
+ ntemp=gp.lik.num_params
+""" prior """
+ ifgp.prior.mean.num_params!=0:
+ mean_grad=gp.prior.get_grad_mean(ndata,prior_params)
+ grad_marlik[ntemp:ntemp+gp.prior.mean.num_params]=-np.inner(
+ alpha,mean_grad
+ )
+
+ ntemp+=gp.prior.mean.num_params
+
+ ifgp.prior.cov.num_params!=0:
+ cov_grad=gp.prior.get_grad_cov(X,prior_params)
+ temp=cov_grad.dot(alpha)
+ grad_marlik[ntemp:]=-0.5*temp.dot(alpha)+0.5*misc.traceAB3(
+ invA,cov_grad
+ )
+
+ returngrad_marlik
+
+
+
+
+[ドキュメント]
+defprepare(gp,X,t,params=None):
+"""
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ t: numpy.ndarray
+ N dimensional array.
+ The negative energy of each search candidate (value of the objective function to be optimized).
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ stats: tupple
+ """
+ ndata=X.shape[0]
+ ndims=X.shape[1]
+
+ ifparamsisNone:
+ params=np.copy(gp.params)
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ G=gp.prior.get_cov(X,params=prior_params)
+ fmu=gp.prior.get_mean(ndata,params=prior_params)
+ B=gp.lik.get_cov(ndata,params=lik_params)
+ A=G+B+1e-8*np.identity(ndata)
+ U=scipy.linalg.cholesky(A,check_finite=False)
+ residual=t-fmu
+ alpha=misc.gauss_elim(U,residual)
+ stats=(U,alpha)
+
+ returnstats
+
+
+
+
+[ドキュメント]
+defget_post_fmean(gp,X,Z,params=None):
+"""
+ Calculating the mean of posterior
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+
+ ndata=X.shape[0]
+ ndims=X.shape[1]
+ ntest=Z.shape[0]
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ alpha=gp.stats[1]
+
+ fmu=gp.prior.get_mean(ntest)
+ G=gp.prior.get_cov(X=Z,Z=X,params=prior_params)
+
+ returnG.dot(alpha)+fmu
+
+
+
+
+[ドキュメント]
+defget_post_fcov(gp,X,Z,params=None,diag=True):
+"""
+ Calculating the covariance of posterior
+
+ Parameters
+ ----------
+ gp: physbo.gp.core.model
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+ Z: numpy.ndarray
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
+ params: numpy.ndarray
+ Parameters.
+ diag: bool
+ If X is the diagonalization matrix, true.
+ Returns
+ -------
+ numpy.ndarray
+ """
+
+ lik_params,prior_params=gp.decomp_params(params)
+
+ U=gp.stats[0]
+ alpha=gp.stats[1]
+
+ G=gp.prior.get_cov(X=X,Z=Z,params=prior_params)
+
+ invUG=scipy.linalg.solve_triangular(
+ U.transpose(),G,lower=True,overwrite_b=False,check_finite=False
+ )
+
+ ifdiag:
+ diagK=gp.prior.get_cov(X=Z,params=prior_params,diag=True)
+ diag_invUG2=misc.diagAB(invUG.transpose(),invUG)
+ post_cov=diagK-diag_invUG2
+ else:
+ K=gp.prior.get_cov(X=Z,params=prior_params)
+ post_cov=K-np.dot(invUG.transpose(),invUG)
+
+ returnpost_cov
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classgauss:
+"""Gaussian likelihood function"""
+
+ def__init__(self,std=1,max_params=1e6,min_params=1e-6):
+"""
+
+ Parameters
+ ----------
+ std: numpy.ndarray or float
+ standard deviation.
+ max_params: float
+ The maximum value of the parameter.
+ If the parameter is greater than this value, it will be replaced by this value.
+ min_params: float
+ The minimum value of the parameter.
+ If the parameter is less than this value, it will be replaced by this value.
+ """
+ self.min_params=np.log(min_params)
+ self.max_params=np.log(max_params)
+ self.num_params=1
+ self.std=std
+ self.params=np.log(std)
+ self.set_params(self.params)
+
+
+[ドキュメント]
+ defsupp_params(self,params=None):
+"""
+ Set maximum (minimum) values for parameters when the parameter is greater(less) than this value.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+ Returns
+ -------
+
+ """
+ ifparamsisNone:
+ params=np.copy(params)
+
+ ifparams>self.max_params:
+ params=self.max_params
+
+ ifparams<self.min_params:
+ params=self.min_params
+
+ returnparams
+
+
+
+[ドキュメント]
+ deftrans_params(self,params=None):
+"""
+ Get exp[params].
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ std: numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+
+ std=np.exp(params)
+ returnstd
+
+
+
+[ドキュメント]
+ defget_params_bound(self):
+"""
+ Get boundary array.
+
+ Returns
+ -------
+ bound: list
+ A num_params-dimensional array with the tuple (min_params, max_params).
+ """
+ bound=[(self.min_params,self.max_params)foriinrange(0,self.num_params)]
+ returnbound
+
+
+
+[ドキュメント]
+ defget_cov(self,num_data,params=None):
+"""
+ Get a covariance matrix
+
+ Parameters
+ ----------
+ num_data: int
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ numpy.ndarray
+ Diagonal element matrix of exp(2.0*params)
+ """
+ std=self.trans_params(params)
+ var=std**2
+ returnvar*np.identity(num_data)
+
+
+
+[ドキュメント]
+ defget_grad(self,num_data,params=None):
+"""
+ Get a gradient matrix
+
+ Parameters
+ ----------
+ num_data: int
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+ numpy.ndarray
+ Diagonal element matrix of 2.0 * exp(2.0*params)
+ """
+ std=self.trans_params(params)
+ var=std**2
+ returnvar*np.identity(num_data)*2
+
+
+
+[ドキュメント]
+ defset_params(self,params):
+"""
+ Set parameters.
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters for optimization.
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
+
+ Returns
+ -------
+
+ """
+ self.params=self.supp_params(params)
+ self.std=self.trans_params(params)
+
+
+
+[ドキュメント]
+ defget_cand_params(self,t):
+"""
+ Getting candidate parameters.
+
+ Parameters
+ ----------
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
+ Returns
+ -------
+ numpy.ndarray
+ log[ standard deviation of t] - log 10.0
+ """
+ returnnp.log(np.std(t)/10)
+
+
+ # [TODO] Check: This function seems not to be used.
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classconst:
+"""constant"""
+
+ def__init__(self,params=None,max_params=1e12,min_params=-1e12):
+"""
+
+ Parameters
+ ----------
+ params: numpy.ndarray
+ Parameters
+ max_params: float
+ Threshold value for specifying the maximum value of the parameter
+ min_params: float
+ Threshold value for specifying the minimum value of the parameter
+
+ """
+ self.max_params=max_params
+ self.min_params=min_params
+ self.init_params(params)
+ self.num_params=1
+
+
+[ドキュメント]
+ defget_params_bound(self):
+"""
+ Getting the boundary list for parameters
+
+ Returns
+ -------
+ bound: list
+ num_params array with the tupple (min_param, max_params)
+
+ """
+ bound=[(self.min_params,self.max_params)foriinrange(0,self.num_params)]
+ returnbound
+
+
+
+[ドキュメント]
+ defget_mean(self,num_data,params=None):
+"""
+
+ Parameters
+ ----------
+ num_data: int
+ total number of data
+ params: numpy.ndarray
+ parameters
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+ ifparamsisNone:
+ params=np.copy(self.params)
+ returnparams*np.ones(num_data)
+
+
+
+[ドキュメント]
+ defget_grad(self,num_data,params=None):
+"""
+ Returning a new array of (num_data), filled with ones.
+
+ Parameters
+ ----------
+ num_data: int
+ total number of data
+ params: object
+ not used
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+ returnnp.ones(num_data)
+[ドキュメント]
+ defget_cand_params(self,t):
+"""
+ Getting the median array of candidates.
+
+ Parameters
+ ----------
+ t: array_like
+ Input array or object that can be converted to an array
+
+ Returns
+ -------
+ median: numpy.ndarray
+ A new array holding the result.
+
+ """
+ returnnp.median(t)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importphysbo.predictor
+
+
+
+[ドキュメント]
+ deffit(self,training,num_basis=None):
+"""
+ Fitting model to training dataset
+
+ Parameters
+ ----------
+ training: physbo.variable
+ dataset for training
+ num_basis: int
+ the number of basis (default: self.config.predict.num_basis)
+ """
+ ifself.model.prior.cov.num_dimisNone:
+ self.model.prior.cov.num_dim=training.X.shape[1]
+ self.model.fit(training.X,training.t,self.config)
+ self.delete_stats()
+[ドキュメント]
+ defprepare(self,training):
+"""
+ Initializing model by using training data set
+
+ Parameters
+ ----------
+ training: physbo.variable
+ dataset for training
+
+ """
+ self.model.prepare(training.X,training.t)
+[ドキュメント]
+ defget_post_fmean(self,training,test):
+"""
+ Calculating posterior mean value of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.get_post_fmean(training.X,test.X)
+
+
+
+[ドキュメント]
+ defget_post_fcov(self,training,test,diag=True):
+"""
+ Calculating posterior variance-covariance matrix of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ diag: bool
+ Diagonlization flag in physbo.exact.get_post_fcov function.
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.get_post_fcov(training.X,test.X,diag=diag)
+
+
+
+[ドキュメント]
+ defget_post_samples(self,training,test,alpha=1):
+"""
+ Drawing samples of mean values of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs (not used)
+ alpha: float
+ tuning parameter of the covariance by multiplying alpha**2 for np.random.multivariate_normal.
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.post_sampling(training.X,test.X,alpha=alpha)
+
+
+
+[ドキュメント]
+ defget_predict_samples(self,training,test,N=1):
+"""
+ Drawing samples of values of model
+
+ Parameters
+ ----------
+ training: physbo.variable
+ training dataset. If already trained, the model does not use this.
+ test: physbo.variable
+ inputs
+ N: int
+ number of samples
+ (default: 1)
+
+ Returns
+ -------
+ numpy.ndarray (N x len(test))
+
+ """
+ ifself.model.statsisNone:
+ self.prepare(training)
+ returnself.model.predict_sampling(training.X,test.X,N=N)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+defcentering(X):
+"""
+ Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+
+ Returns
+ -------
+ X_normalized: numpy array
+ normalized N x d dimensional matrix.
+ """
+ stdX=np.std(X,0)
+ index=np.where(stdX!=0)
+ X_normalized=(X[:,index[0]]-np.mean(X[:,index[0]],0))/stdX[index[0]]
+ returnX_normalized
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy
+
+
+
+[ドキュメント]
+defgauss_elim(U,t):
+"""
+ Calculate alpha using scipy.linalg.solve_triangular.
+ alpha = (U^T U)^-1 t = U^-1 [(U^T)-1 t]
+
+ Parameters
+ ----------
+ U: (M, M) array_like
+ A triangular matrix
+ t: (M,) or (M, N) array_like
+
+ Returns
+ -------
+ alpha: numpy.ndarray
+ Solution to the system L^T alpha = t. Shape of return matches t.
+ """
+ alpha=scipy.linalg.solve_triangular(
+ U.transpose(),t,lower=True,overwrite_b=False,check_finite=False
+ )
+
+ alpha=scipy.linalg.solve_triangular(
+ U,alpha,lower=False,overwrite_b=False,check_finite=False
+ )
+ returnalpha
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importconfigparser
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classadam:
+"""
+ Optimizer of f(x) with the adam method
+
+ Attributes
+ ==========
+ params: numpy.ndarray
+ current input, x
+ nparams: int
+ dimension
+ grad: function
+ gradient function, g(x) = f'(x)
+ m: numpy.ndarray
+ v: numpy.ndarray
+ epoch: int
+ the number of update already done
+ max_epoch: int
+ the maximum number of update
+ alpha: float
+ beta: float
+ gamma: float
+ epsilon: float
+ """
+
+ def__init__(self,params,grad,options={}):
+"""
+
+ Parameters
+ ==========
+ params:
+ grad:
+ options: dict
+ Hyperparameters for the adam method
+
+ - "alpha" (default: 0.001)
+ - "beta" (default: 0.9)
+ - "gamma" (default: 0.9999)
+ - "epsilon" (default: 1e-8)
+ - "max_epoch" (default: 4000)
+ """
+ self.grad=grad
+ self.params=params
+ self.nparams=params.shape[0]
+ self._set_options(options)
+ self.m=np.zeros(self.nparams)
+ self.v=np.zeros(self.nparams)
+ self.epoch=0
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importpickleaspickle
+importnumpyasnp
+fromphysboimportgp
+
+
+
+[ドキュメント]
+classbase_predictor(object):
+"""
+ Base predictor is defined in this class.
+
+ """
+
+ def__init__(self,config,model=None):
+"""
+
+ Parameters
+ ----------
+ config: set_config object (physbo.misc.set_config)
+ model: model object
+ A default model is set as gp.core.model
+ """
+
+ self.config=config
+ self.model=model
+ ifself.modelisNone:
+ self.model=gp.core.model(
+ cov=gp.cov.gauss(num_dim=None,ard=False),
+ mean=gp.mean.const(),
+ lik=gp.lik.gauss(),
+ )
+
+
+[ドキュメント]
+ deffit(self,*args,**kwds):
+"""
+
+ Default fit function.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defprepare(self,*args,**kwds):
+"""
+
+ Default prepare function.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defdelete_stats(self,*args,**kwds):
+"""
+
+ Default function to delete status.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_basis(self,*args,**kwds):
+"""
+
+ Default function to get basis
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_post_fmean(self,*args,**kwds):
+"""
+
+ Default function to get a mean value of the score.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_post_fcov(self,*args,**kwds):
+"""
+
+ Default function to get a covariance of the score.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_post_params(self,*args,**kwds):
+"""
+
+ Default function to get parameters.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_post_samples(self,*args,**kwds):
+"""
+
+ Default function to get samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_predict_samples(self,*args,**kwds):
+"""
+
+ Default function to get prediction variables of samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defget_post_params_samples(self,*args,**kwds):
+"""
+
+ Default function to get parameters of samples.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defupdate(self,*args,**kwds):
+"""
+
+ Default function to update variables.
+ This function must be overwritten in each model.
+
+ Parameters
+ ----------
+ args
+ kwds
+
+ Returns
+ -------
+
+ """
+ raiseNotImplementedError
+
+
+
+[ドキュメント]
+ defsave(self,file_name):
+"""
+
+ Default function to save information by using pickle.dump function.
+ The protocol version is set as 3.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name to save self.__dict__ object.
+
+ Returns
+ -------
+
+ """
+ withopen(file_name,"wb")asf:
+ pickle.dump(self.__dict__,f,4)
+
+
+
+[ドキュメント]
+ defload(self,file_name):
+"""
+
+ Default function to load variables.
+ The information is updated using self.update function.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name to load variables from the file.
+
+ Returns
+ -------
+
+ """
+ withopen(file_name,"rb")asf:
+ tmp_dict=pickle.load(f)
+ self.config=tmp_dict["config"]
+ self.model=tmp_dict["model"]
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickleaspickle
+importitertools
+importtime
+
+from.resultsimporthistory
+from..importutility
+from..importscoreassearch_score
+from...gpimportpredictorasgp_predictor
+from...blmimportpredictorasblm_predictor
+from...miscimportset_config
+
+fromphysbo.variableimportvariable
+
+
+
+[ドキュメント]
+classpolicy:
+ def__init__(self,test_X,config=None,initial_data=None,comm=None):
+"""
+
+ Parameters
+ ----------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ config: set_config object (physbo.misc.set_config)
+ initial_data: tuple[np.ndarray, np.ndarray]
+ The initial training datasets.
+ The first elements is the array of actions and the second is the array of value of objective functions
+ comm: MPI.Comm, optional
+ MPI Communicator
+ """
+ self.predictor=None
+ self.training=variable()
+ self.new_data=None
+ self.test=self._make_variable_X(test_X)
+ self.actions=np.arange(0,self.test.X.shape[0])
+ self.history=history()
+ ifconfigisNone:
+ self.config=set_config()
+ else:
+ self.config=config
+
+ ifinitial_dataisnotNone:
+ iflen(initial_data)!=2:
+ msg="ERROR: initial_data should be 2-elements tuple or list (actions and objectives)"
+ raiseRuntimeError(msg)
+ actions,fs=initial_data
+ iflen(actions)!=len(fs):
+ msg="ERROR: len(initial_data[0]) != len(initial_data[1])"
+ raiseRuntimeError(msg)
+ self.write(actions,fs)
+ self.actions=np.array(sorted(list(set(self.actions)-set(actions))))
+
+ ifcommisNone:
+ self.mpicomm=None
+ self.mpisize=1
+ self.mpirank=0
+ else:
+ self.mpicomm=comm
+ self.mpisize=comm.size
+ self.mpirank=comm.rank
+ self.actions=np.array_split(self.actions,self.mpisize)[self.mpirank]
+ self.config.learning.is_disp=(
+ self.config.learning.is_dispandself.mpirank==0
+ )
+
+
+[ドキュメント]
+ defset_seed(self,seed):
+"""
+ Setting a seed parameter for np.random.
+
+ Parameters
+ ----------
+ seed: int
+ seed number
+ -------
+
+ """
+ self.seed=seed
+ np.random.seed(self.seed)
+
+
+
+[ドキュメント]
+ defwrite(
+ self,
+ action,
+ t,
+ X=None,
+ time_total=None,
+ time_update_predictor=None,
+ time_get_action=None,
+ time_run_simulator=None,
+ ):
+"""
+ Writing history (update history, not output to a file).
+
+ Parameters
+ ----------
+ action: numpy.ndarray
+ Indexes of actions.
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ X: numpy.ndarray
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ time_total: numpy.ndarray
+ N dimenstional array. The total elapsed time in each step.
+ If None (default), filled by 0.0.
+ time_update_predictor: numpy.ndarray
+ N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+ If None (default), filled by 0.0.
+ time_get_action: numpy.ndarray
+ N dimenstional array. The elapsed time for getting next action in each step.
+ If None (default), filled by 0.0.
+ time_run_simulator: numpy.ndarray
+ N dimenstional array. The elapsed time for running the simulator in each step.
+ If None (default), filled by 0.0.
+
+ Returns
+ -------
+
+ """
+ ifXisNone:
+ X=self.test.X[action,:]
+ Z=self.test.Z[action,:]ifself.test.ZisnotNoneelseNone
+ else:
+ Z=self.predictor.get_basis(X)ifself.predictorisnotNoneelseNone
+
+ self.history.write(
+ t,
+ action,
+ time_total=time_total,
+ time_update_predictor=time_update_predictor,
+ time_get_action=time_get_action,
+ time_run_simulator=time_run_simulator,
+ )
+ self.training.add(X=X,t=t,Z=Z)
+
+ # remove the selected actions from the list of candidates if exists
+ iflen(self.actions)>0:
+ local_index=np.searchsorted(self.actions,action)
+ local_index=local_index[
+ np.take(self.actions,local_index,mode="clip")==action
+ ]
+ self.actions=self._delete_actions(local_index)
+
+ ifself.new_dataisNone:
+ self.new_data=variable(X=X,t=t,Z=Z)
+ else:
+ self.new_data.add(X=X,t=t,Z=Z)
+
+
+
+[ドキュメント]
+ defrandom_search(
+ self,max_num_probes,num_search_each_probe=1,simulator=None,is_disp=True
+ ):
+"""
+ Performing random search.
+
+ Parameters
+ ----------
+ max_num_probes: int
+ Maximum number of random search process.
+ num_search_each_probe: int
+ Number of search at each random search process.
+ simulator: callable
+ Callable (function or object with ``__call__``) from action to t
+ Here, action is an integer which represents the index of the candidate.
+ is_disp: bool
+ If true, process messages are outputted.
+ Returns
+ -------
+ history: history object (physbo.search.discrete.results.history)
+ """
+
+ ifself.mpirank!=0:
+ is_disp=False
+
+ N=int(num_search_each_probe)
+
+ ifis_disp:
+ utility.show_interactive_mode(simulator,self.history)
+
+ forninrange(0,max_num_probes):
+ time_total=time.time()
+ ifis_dispandN>1:
+ utility.show_start_message_multi_search(self.history.num_runs)
+
+ time_get_action=time.time()
+ action=self._get_random_action(N)
+ time_get_action=time.time()-time_get_action
+
+ N_indeed=len(action)
+ ifN_indeed==0:
+ ifself.mpirank==0:
+ print("WARNING: All actions have already searched.")
+ returncopy.deepcopy(self.history)
+
+ ifsimulatorisNone:
+ returnaction
+
+ time_run_simulator=time.time()
+ t=_run_simulator(simulator,action,self.mpicomm)
+ time_run_simulator=time.time()-time_run_simulator
+
+ time_total=time.time()-time_total
+ self.write(
+ action,
+ t,
+ time_total=[time_total]*N_indeed,
+ time_update_predictor=np.zeros(N_indeed,dtype=float),
+ time_get_action=[time_get_action]*N_indeed,
+ time_run_simulator=[time_run_simulator]*N_indeed,
+ )
+
+ ifis_disp:
+ utility.show_search_results(self.history,N_indeed)
+
+ returncopy.deepcopy(self.history)
+
+
+
+[ドキュメント]
+ defbayes_search(
+ self,
+ training=None,
+ max_num_probes=None,
+ num_search_each_probe=1,
+ predictor=None,
+ is_disp=True,
+ simulator=None,
+ score="TS",
+ interval=0,
+ num_rand_basis=0,
+ ):
+"""
+ Performing Bayesian optimization.
+
+ Parameters
+ ----------
+ training: physbo.variable
+ Training dataset.
+ max_num_probes: int
+ Maximum number of searching process by Bayesian optimization.
+ num_search_each_probe: int
+ Number of searching by Bayesian optimization at each process.
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ If None, blm_predictor is defined.
+ is_disp: bool
+ If true, process messages are outputted.
+ simulator: callable
+ Callable (function or object with ``__call__``)
+ Here, action is an integer which represents the index of the candidate.
+ score: str
+ The type of aquision funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ interval: int
+ The interval number of learning the hyper parameter.
+ If you set the negative value to interval, the hyper parameter learning is not performed.
+ If you set zero to interval, the hyper parameter learning is performed only at the first step.
+ num_rand_basis: int
+ The number of basis function. If you choose 0, ordinary Gaussian process run.
+
+ Returns
+ -------
+ history: history object (physbo.search.discrete.results.history)
+ """
+
+ ifself.mpirank!=0:
+ is_disp=False
+
+ old_disp=self.config.learning.is_disp
+ self.config.learning.is_disp=is_disp
+
+ ifmax_num_probesisNone:
+ max_num_probes=1
+ simulator=None
+
+ is_rand_expans=num_rand_basis!=0
+
+ iftrainingisnotNone:
+ self.training=training
+
+ ifpredictorisnotNone:
+ self.predictor=predictor
+ elifself.predictorisNone:
+ self._init_predictor(is_rand_expans)
+
+ ifmax_num_probes==0andinterval>=0:
+ self._learn_hyperparameter(num_rand_basis)
+
+ N=int(num_search_each_probe)
+
+ forninrange(max_num_probes):
+ time_total=time.time()
+
+ time_update_predictor=time.time()
+ ifutility.is_learning(n,interval):
+ self._learn_hyperparameter(num_rand_basis)
+ else:
+ self._update_predictor()
+ time_update_predictor=time.time()-time_update_predictor
+
+ ifnum_search_each_probe!=1:
+ utility.show_start_message_multi_search(self.history.num_runs,score)
+
+ time_get_action=time.time()
+ K=self.config.search.multi_probe_num_sampling
+ alpha=self.config.search.alpha
+ action=self._get_actions(score,N,K,alpha)
+ time_get_action=time.time()-time_get_action
+
+ N_indeed=len(action)
+ ifN_indeed==0:
+ ifself.mpirank==0:
+ print("WARNING: All actions have already searched.")
+ break
+
+ ifsimulatorisNone:
+ self.config.learning.is_disp=old_disp
+ returnaction
+
+ time_run_simulator=time.time()
+ t=_run_simulator(simulator,action,self.mpicomm)
+ time_run_simulator=time.time()-time_run_simulator
+
+ time_total=time.time()-time_total
+ self.write(
+ action,
+ t,
+ time_total=[time_total]*N_indeed,
+ time_update_predictor=[time_update_predictor]*N_indeed,
+ time_get_action=[time_get_action]*N_indeed,
+ time_run_simulator=[time_run_simulator]*N_indeed,
+ )
+
+ ifis_disp:
+ utility.show_search_results(self.history,N_indeed)
+ self._update_predictor()
+ self.config.learning.is_disp=old_disp
+ returncopy.deepcopy(self.history)
+
+
+ @staticmethod
+ def_warn_no_predictor(method_name):
+ print("Warning: Since policy.predictor is not yet set,")
+ print(" a GP predictor (num_rand_basis=0) is used for predicting")
+ print(" If you want to use a BLM predictor (num_rand_basis>0),")
+ print(" call bayes_search(max_num_probes=0, num_rand_basis=nrb)")
+ print(" before calling {}.".format(method_name))
+
+
+[ドキュメント]
+ defget_post_fmean(self,xs):
+"""Calculate mean value of predictor (post distribution)"""
+ X=self._make_variable_X(xs)
+ ifself.predictorisNone:
+ self._warn_no_predictor("get_post_fmean()")
+ predictor=gp_predictor(self.config)
+ predictor.fit(self.training,0)
+ predictor.prepare(self.training)
+ returnpredictor.get_post_fmean(self.training,X)
+ else:
+ self._update_predictor()
+ returnself.predictor.get_post_fmean(self.training,X)
+[ドキュメント]
+ defget_score(
+ self,
+ mode,
+ *,
+ actions=None,
+ xs=None,
+ predictor=None,
+ training=None,
+ parallel=True,
+ alpha=1
+ ):
+"""
+ Calcualte score (acquisition function)
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquisition funciton. TS, EI and PI are available.
+ These functions are defined in score.py.
+ actions: array of int
+ actions to calculate score
+ xs: physbo.variable or np.ndarray
+ input parameters to calculate score
+ predictor: predictor object
+ predictor used to calculate score.
+ If not given, self.predictor will be used.
+ training:physbo.variable
+ Training dataset.
+ If not given, self.training will be used.
+ parallel: bool
+ Calculate scores in parallel by MPI (default: True)
+ alpha: float
+ Tuning parameter which is used if mode = TS.
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+ Returns
+ -------
+ f: float or list of float
+ Score defined in each mode.
+
+ Raises
+ ------
+ RuntimeError
+ If both *actions* and *xs* are given
+
+ Notes
+ -----
+ When neither *actions* nor *xs* are given, scores for actions not yet searched will be calculated.
+
+ When *parallel* is True, it is assumed that the function receives the same input (*actions* or *xs*) for all the ranks.
+ If you want to split the input array itself, set *parallel* be False and merge results by yourself.
+ """
+ iftrainingisNone:
+ training=self.training
+
+ iftraining.XisNoneortraining.X.shape[0]==0:
+ msg="ERROR: No training data is registered."
+ raiseRuntimeError(msg)
+
+ ifpredictorisNone:
+ ifself.predictorisNone:
+ self._warn_no_predictor("get_score()")
+ predictor=gp_predictor(self.config)
+ predictor.fit(training,0)
+ predictor.prepare(training)
+ else:
+ self._update_predictor()
+ predictor=self.predictor
+
+ ifxsisnotNone:
+ ifactionsisnotNone:
+ raiseRuntimeError("ERROR: both actions and xs are given")
+ test=self._make_variable_X(xs)
+ ifparallelandself.mpisize>1:
+ actions=np.array_split(np.arange(test.X.shape[0]),self.mpisize)
+ test=test.get_subset(actions[self.mpirank])
+ else:
+ ifactionsisNone:
+ actions=self.actions
+ else:
+ ifisinstance(actions,int):
+ actions=[actions]
+ ifparallelandself.mpisize>1:
+ actions=np.array_split(actions,self.mpisize)[self.mpirank]
+ test=self.test.get_subset(actions)
+
+ f=search_score.score(
+ mode,predictor=predictor,training=training,test=test,alpha=alpha
+ )
+ ifparallelandself.mpisize>1:
+ fs=self.mpicomm.allgather(f)
+ f=np.hstack(fs)
+ returnf
+
+
+ def_get_marginal_score(self,mode,chosen_actions,K,alpha):
+"""
+ Getting marginal scores.
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquision funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ These functions are defined in score.py.
+ chosen_actions: numpy.ndarray
+ Array of selected actions.
+ K: int
+ The number of samples for evaluating score.
+ alpha: float
+ not used.
+
+ Returns
+ -------
+ f: list
+ N dimensional scores (score is defined in each mode)
+ """
+ f=np.zeros((K,len(self.actions)),dtype=float)
+
+ # draw K samples of the values of objective function of chosen actions
+ new_test_local=self.test.get_subset(chosen_actions)
+ virtual_t_local=self.predictor.get_predict_samples(
+ self.training,new_test_local,K
+ )
+ ifself.mpisize==1:
+ new_test=new_test_local
+ virtual_t=virtual_t_local
+ else:
+ new_test=variable()
+ forntinself.mpicomm.allgather(new_test_local):
+ new_test.add(X=nt.X,t=nt.t,Z=nt.Z)
+ virtual_t=np.concatenate(self.mpicomm.allgather(virtual_t_local),axis=1)
+ # virtual_t = self.predictor.get_predict_samples(self.training, new_test, K)
+
+ forkinrange(K):
+ predictor=copy.deepcopy(self.predictor)
+ train=copy.deepcopy(self.training)
+ virtual_train=new_test
+ virtual_train.t=virtual_t[k,:]
+
+ ifvirtual_train.ZisNone:
+ train.add(virtual_train.X,virtual_train.t)
+ else:
+ train.add(virtual_train.X,virtual_train.t,virtual_train.Z)
+
+ predictor.update(train,virtual_train)
+
+ f[k,:]=self.get_score(
+ mode,predictor=predictor,training=train,parallel=False
+ )
+ returnnp.mean(f,axis=0)
+
+ def_get_actions(self,mode,N,K,alpha):
+"""
+ Getting next candidates
+
+ Parameters
+ ----------
+ mode: str
+ The type of aquisition funciton.
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+ These functions are defined in score.py.
+ N: int
+ The total number of actions to return.
+ K: int
+ The total number of samples to evaluate marginal score
+ alpha: float
+ Tuning parameter which is used if mode = TS.
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+ Returns
+ -------
+ chosen_actions: numpy.ndarray
+ An N-dimensional array of actions selected in each search process.
+ """
+ f=self.get_score(
+ mode,
+ predictor=self.predictor,
+ training=self.training,
+ alpha=alpha,
+ parallel=False,
+ )
+ champion,local_champion,local_index=self._find_champion(f)
+ ifchampion==-1:
+ returnnp.zeros(0,dtype=int)
+ ifchampion==local_champion:
+ self.actions=self._delete_actions(local_index)
+
+ chosen_actions=[champion]
+ forninrange(1,N):
+ f=self._get_marginal_score(mode,chosen_actions[0:n],K,alpha)
+ champion,local_champion,local_index=self._find_champion(f)
+ ifchampion==-1:
+ break
+ ifchampion==local_champion:
+ self.actions=self._delete_actions(local_index)
+ chosen_actions.append(champion)
+ returnnp.array(chosen_actions)
+
+ def_find_champion(self,f):
+ iflen(f)==0:
+ local_fmax=-float("inf")
+ local_index=-1
+ local_champion=-1
+ else:
+ local_fmax=np.max(f)
+ local_index=np.argmax(f)
+ local_champion=self.actions[local_index]
+ ifself.mpisize==1:
+ champion=local_champion
+ else:
+ local_champions=self.mpicomm.allgather(local_champion)
+ local_fs=self.mpicomm.allgather(local_fmax)
+ champion_rank=np.argmax(local_fs)
+ champion=local_champions[champion_rank]
+ returnchampion,local_champion,local_index
+
+ def_get_random_action(self,N):
+"""
+ Getting indexes of actions randomly.
+
+ Parameters
+ ----------
+ N: int
+ Total number of search candidates.
+ Returns
+ -------
+ action: numpy.ndarray
+ Indexes of actions selected randomly from search candidates.
+ """
+ ifself.mpisize==1:
+ n=len(self.actions)
+ ifn<=N:
+ index=np.arange(0,n)
+ else:
+ index=np.random.choice(len(self.actions),N,replace=False)
+ action=self.actions[index]
+ self.actions=self._delete_actions(index)
+ else:
+ nactions=self.mpicomm.gather(len(self.actions),root=0)
+ local_indices=[[]for_inrange(self.mpisize)]
+ ifself.mpirank==0:
+ hi=np.add.accumulate(nactions)
+ lo=np.roll(hi,1)
+ lo[0]=0
+ ifhi[-1]<=N:
+ index=np.arange(0,hi[-1])
+ else:
+ index=np.random.choice(hi[-1],N,replace=False)
+ ranks=np.searchsorted(hi,index,side="right")
+ forr,iinzip(ranks,index):
+ local_indices[r].append(i-lo[r])
+ local_indices=self.mpicomm.scatter(local_indices,root=0)
+ local_actions=self.actions[local_indices]
+ self.actions=self._delete_actions(local_indices)
+ action=self.mpicomm.allgather(local_actions)
+ action=itertools.chain.from_iterable(action)
+ action=np.array(list(action))
+ returnaction
+
+
+[ドキュメント]
+ defsave(self,file_history,file_training=None,file_predictor=None):
+"""
+
+ Saving history, training and predictor into the corresponding files.
+
+ Parameters
+ ----------
+ file_history: str
+ The name of the file that stores the information of the history.
+ file_training: str
+ The name of the file that stores the training dataset.
+ file_predictor: str
+ The name of the file that stores the predictor dataset.
+
+ Returns
+ -------
+
+ """
+ ifself.mpirank==0:
+ self.history.save(file_history)
+
+ iffile_trainingisnotNone:
+ self.training.save(file_training)
+
+ iffile_predictorisnotNone:
+ withopen(file_predictor,"wb")asf:
+ pickle.dump(self.predictor,f)
+
+
+
+[ドキュメント]
+ defload(self,file_history,file_training=None,file_predictor=None):
+"""
+
+ Loading files about history, training and predictor.
+
+ Parameters
+ ----------
+ file_history: str
+ The name of the file that stores the information of the history.
+ file_training: str
+ The name of the file that stores the training dataset.
+ file_predictor: str
+ The name of the file that stores the predictor dataset.
+
+ Returns
+ -------
+
+ """
+ self.history.load(file_history)
+
+ iffile_trainingisNone:
+ N=self.history.total_num_search
+ X=self.test.X[self.history.chosen_actions[0:N],:]
+ t=self.history.fx[0:N]
+ self.training=variable(X=X,t=t)
+ else:
+ self.training=variable()
+ self.training.load(file_training)
+
+ iffile_predictorisnotNone:
+ withopen(file_predictor,"rb")asf:
+ self.predictor=pickle.load(f)
+
+ N=self.history.total_num_search
+
+ visited=self.history.chosen_actions[:N]
+ local_index=np.searchsorted(self.actions,visited)
+ local_index=local_index[
+ np.take(self.actions,local_index,mode="clip")==visited
+ ]
+ self.actions=self._delete_actions(local_index)
+[ドキュメント]
+ defexport_training(self):
+"""
+ Returning the training dataset
+
+ Returns
+ -------
+
+ """
+ returnself.training
+
+
+
+[ドキュメント]
+ defexport_history(self):
+"""
+ Returning the information of the history.
+
+ Returns
+ -------
+
+ """
+ returnself.history
+
+
+ def_init_predictor(self,is_rand_expans):
+"""
+ Initialize predictor.
+
+ Parameters
+ ----------
+ is_rand_expans: bool
+ If true, physbo.blm.predictor is selected.
+ If false, physbo.gp.predictor is selected.
+ """
+ ifis_rand_expans:
+ self.predictor=blm_predictor(self.config)
+ else:
+ self.predictor=gp_predictor(self.config)
+
+ def_learn_hyperparameter(self,num_rand_basis):
+ self.predictor.fit(self.training,num_rand_basis)
+ self.test.Z=self.predictor.get_basis(self.test.X)
+ self.training.Z=self.predictor.get_basis(self.training.X)
+ self.predictor.prepare(self.training)
+ self.new_data=None
+
+ def_update_predictor(self):
+ ifself.new_dataisnotNone:
+ self.predictor.update(self.training,self.new_data)
+ self.new_data=None
+
+ def_make_variable_X(self,test_X):
+"""
+ Make a new *variable* with X=test_X
+
+ Parameters
+ ----------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ Returns
+ -------
+ test_X: numpy.ndarray or physbo.variable
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
+ """
+ ifisinstance(test_X,np.ndarray):
+ test=variable(X=test_X)
+ elifisinstance(test_X,variable):
+ test=test_X
+ else:
+ raiseTypeError("The type of test_X must be ndarray or physbo.variable")
+ returntest
+
+ def_delete_actions(self,index,actions=None):
+"""
+ Returns remaining actions
+
+ Notes
+ -----
+ This method itself does not modify *self*
+
+ Parameters
+ ----------
+ index: int
+ Index of an action to be deleted.
+ actions: numpy.ndarray
+ Array of actions.
+ Returns
+ -------
+ actions: numpy.ndarray
+ Array of actions which does not include action specified by index.
+ """
+ ifactionsisNone:
+ actions=self.actions
+ returnnp.delete(actions,index)
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickle
+
+from..importutility
+
+MAX_SEARCH=int(30000)
+
+
+
+[ドキュメント]
+ defwrite(
+ self,
+ t,
+ action,
+ time_total=None,
+ time_update_predictor=None,
+ time_get_action=None,
+ time_run_simulator=None,
+ ):
+"""
+ Overwrite fx and chosen_actions by t and action.
+
+ Parameters
+ ----------
+ t: numpy.ndarray
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ action: numpy.ndarray
+ N dimensional array. The indexes of actions of each search candidate.
+ time_total: numpy.ndarray
+ N dimenstional array. The total elapsed time in each step.
+ If None (default), filled by 0.0.
+ time_update_predictor: numpy.ndarray
+ N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+ If None (default), filled by 0.0.
+ time_get_action: numpy.ndarray
+ N dimenstional array. The elapsed time for getting next action in each step.
+ If None (default), filled by 0.0.
+ time_run_simulator: numpy.ndarray
+ N dimenstional array. The elapsed time for running the simulator in each step.
+ If None (default), filled by 0.0.
+ Returns
+ -------
+
+ """
+ N=utility.length_vector(t)
+ st=self.total_num_search
+ en=st+N
+
+ self.terminal_num_run[self.num_runs]=en
+ self.fx[st:en]=t
+ self.chosen_actions[st:en]=action
+ self.num_runs+=1
+ self.total_num_search+=N
+
+ iftime_totalisNone:
+ time_total=np.zeros(N,dtype=float)
+ self.time_total_[st:en]=time_total
+
+ iftime_update_predictorisNone:
+ time_update_predictor=np.zeros(N,dtype=float)
+ self.time_update_predictor_[st:en]=time_update_predictor
+
+ iftime_get_actionisNone:
+ time_get_action=np.zeros(N,dtype=float)
+ self.time_get_action_[st:en]=time_get_action
+
+ iftime_run_simulatorisNone:
+ time_run_simulator=np.zeros(N,dtype=float)
+ self.time_run_simulator_[st:en]=time_run_simulator
+
+
+
+[ドキュメント]
+ defexport_sequence_best_fx(self):
+"""
+ Export fx and actions at each sequence.
+ (The total number of data is num_runs.)
+
+ Returns
+ -------
+ best_fx: numpy.ndarray
+ best_actions: numpy.ndarray
+ """
+ best_fx=np.zeros(self.num_runs,dtype=float)
+ best_actions=np.zeros(self.num_runs,dtype=int)
+ forninrange(self.num_runs):
+ index=np.argmax(self.fx[0:self.terminal_num_run[n]])
+ best_actions[n]=self.chosen_actions[index]
+ best_fx[n]=self.fx[index]
+
+ returnbest_fx,best_actions
+
+
+
+[ドキュメント]
+ defexport_all_sequence_best_fx(self):
+"""
+ Export all fx and actions at each sequence.
+ (The total number of data is total_num_research.)
+
+ Returns
+ -------
+ best_fx: numpy.ndarray
+ best_actions: numpy.ndarray
+ """
+ best_fx=np.zeros(self.total_num_search,dtype=float)
+ best_actions=np.zeros(self.total_num_search,dtype=int)
+ best_fx[0]=self.fx[0]
+ best_actions[0]=self.chosen_actions[0]
+
+ forninrange(1,self.total_num_search):
+ ifbest_fx[n-1]<self.fx[n]:
+ best_fx[n]=self.fx[n]
+ best_actions[n]=self.chosen_actions[n]
+ else:
+ best_fx[n]=best_fx[n-1]
+ best_actions[n]=best_actions[n-1]
+
+ returnbest_fx,best_actions
+
+
+
+[ドキュメント]
+ defsave(self,filename):
+"""
+ Save the information of the history.
+
+ Parameters
+ ----------
+ filename: str
+ The name of the file which stores the information of the history
+ Returns
+ -------
+
+ """
+ N=self.total_num_search
+ M=self.num_runs
+ np.savez_compressed(
+ filename,
+ num_runs=M,
+ total_num_search=N,
+ fx=self.fx[0:N],
+ chosen_actions=self.chosen_actions[0:N],
+ terminal_num_run=self.terminal_num_run[0:M],
+ )
+
+
+
+[ドキュメント]
+ defload(self,filename):
+"""
+ Load the information of the history.
+
+ Parameters
+ ----------
+ filename: str
+ The name of the file which stores the information of the history
+ Returns
+ -------
+
+ """
+ data=np.load(filename)
+ M=int(data["num_runs"])
+ N=int(data["total_num_search"])
+ self.num_runs=M
+ self.total_num_search=N
+ self.fx[0:N]=data["fx"]
+ self.chosen_actions[0:N]=data["chosen_actions"]
+ self.terminal_num_run[0:M]=data["terminal_num_run"]
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importcopy
+importpickleaspickle
+importtime
+
+from.resultsimporthistory
+from..importdiscrete
+from..importutility
+from..importscore_multiassearch_score
+from...gpimportpredictorasgp_predictor
+from...blmimportpredictorasblm_predictor
+from...miscimportset_config
+from...variableimportvariable
+
+fromtypingimportList,Optional
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importpickle
+importcopy
+
+from..importpareto
+
+MAX_SEARCH=int(30000)
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classRectangles(object):
+ def__init__(self,n_dim,dtype):
+"""
+ Initilize a set of hyper-rectangle.
+
+ :param n_dim: dimension of rectangles
+ """
+ self.n_dim=n_dim
+ self.lb=np.zeros((0,self.n_dim),dtype=dtype)
+ self.ub=np.zeros((0,self.n_dim),dtype=dtype)
+
+
+[ドキュメント]
+ defadd(self,lb,ub):
+"""
+ Add new rectangles.
+
+ :param lb: lower bounds of rectangles
+ :param ub: upper bounds of rectangles
+ """
+ self.lb=np.r_[self.lb,lb]
+ self.ub=np.r_[self.ub,ub]
+
+
+
+
+
+[ドキュメント]
+defdominate(t1,t2):
+"""domination rule for maximization problem"""
+ returnnp.all(t1>=t2)andnp.any(t1>t2)
+
+
+ def__divide_2d(self):
+"""
+ Divide non-dominated region into vertical rectangles for the case of 2-objectives.
+
+ Assumes that Pareto set has been sorted on the first objective in ascending order.
+
+ Notes:
+ In 2-dimensional cases, the second objective has be sorted in decending order.
+ """
+ n_cells=self.front.shape[0]+1
+ lb_idx=[[i,(i+1)%n_cells]foriinrange(n_cells)]
+ ub_idx=[[i+1,n_cells]foriinrange(n_cells)]
+
+ self.cells.add(lb_idx,ub_idx)
+
+ def__included_in_non_dom_region(self,p):
+ returnnp.all([np.any(pf<=p)forpfinself.front])
+
+ def__divide_using_binary_search(self):
+ front=np.r_[
+ np.full((1,self.num_objectives),-np.inf),
+ self.front,
+ np.full((1,self.num_objectives),np.inf),
+ ]
+
+ # Pareto front indices when sorted on each dimension's front value in ascending order.
+ # (indices start from 1)
+ # Index 0 means anti-ideal value, index `self.front.shape[0] + 1` means ideal point.
+ front_idx=np.r_[
+ np.zeros((1,self.num_objectives),dtype=int),
+ np.argsort(self.front,axis=0)+1,
+ np.full((1,self.num_objectives),self.front.shape[0]+1,dtype=int),
+ ]
+
+ rect_candidates=[[np.copy(front_idx[0]),np.copy(front_idx[-1])]]
+
+ whilerect_candidates:
+ rect=rect_candidates.pop()
+
+ lb_idx=[front_idx[rect[0][d],d]fordinrange(self.num_objectives)]
+ ub_idx=[front_idx[rect[1][d],d]fordinrange(self.num_objectives)]
+ lb=[front[lb_idx[d],d]fordinrange(self.num_objectives)]
+ ub=[front[ub_idx[d],d]fordinrange(self.num_objectives)]
+
+ ifself.__included_in_non_dom_region(lb):
+ self.cells.add([lb_idx],[ub_idx])
+
+ elifself.__included_in_non_dom_region(ub):
+ rect_sizes=rect[1]-rect[0]
+
+ # divide rectangle by the dimension with largest size
+ ifnp.any(rect_sizes>1):
+ div_dim=np.argmax(rect_sizes)
+ div_point=rect[0][div_dim]+int(round(rect_sizes[div_dim]/2.0))
+
+ # add divided left rectangle
+ left_ub_idx=np.copy(rect[1])
+ left_ub_idx[div_dim]=div_point
+ rect_candidates.append([np.copy(rect[0]),left_ub_idx])
+
+ # add divided right rectangle
+ right_lb_idx=np.copy(rect[0])
+ right_lb_idx[div_dim]=div_point
+ rect_candidates.append([right_lb_idx,np.copy(rect[1])])
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy.stats
+
+
+
+[ドキュメント]
+defscore(mode,predictor,test,training=None,**kwargs):
+"""
+ Calculate scores (acquisition function) for test data.
+
+ Parameters
+ ----------
+ mode: str
+ Kind of score.
+
+ "EI", "PI", and "TS" are available.
+
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+
+ test: physbo.variable
+ Inputs
+
+ Other Parameters
+ ----------------
+ fmax: float
+ Max value of mean of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Used only for mode == "EI" and "PI"
+
+ alpha: float
+ noise for sampling source (default: 1.0)
+ Used only for mode == "TS"
+
+ Returns
+ -------
+ score: numpy.ndarray
+
+ Raises
+ ------
+ NotImplementedError
+ If unknown mode is given
+ """
+
+ iftest.X.shape[0]==0:
+ returnnp.zeros(0)
+
+ ifmode=="EI":
+ fmax=kwargs.get("fmax",None)
+ returnEI(predictor,training,test,fmax)
+ elifmode=="PI":
+ fmax=kwargs.get("fmax",None)
+ returnPI(predictor,training,test,fmax)
+ elifmode=="TS":
+ alpha=kwargs.get("alpha",1.0)
+ returnTS(predictor,training,test,alpha)
+ else:
+ raiseNotImplementedError("ERROR: mode must be EI, PI or TS.")
+
+
+
+
+[ドキュメント]
+defEI(predictor,training,test,fmax=None):
+"""
+ Maximum expected improvement.
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ fmax: float
+ Max value of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ fmean=predictor.get_post_fmean(training,test)
+ fcov=predictor.get_post_fcov(training,test)
+ fstd=np.sqrt(fcov)
+
+ iffmaxisNone:
+ fmax=np.max(predictor.get_post_fmean(training,training))
+
+ temp1=fmean-fmax
+ temp2=temp1/fstd
+ score=temp1*scipy.stats.norm.cdf(temp2)+fstd*scipy.stats.norm.pdf(temp2)
+ returnscore
+
+
+
+
+[ドキュメント]
+defPI(predictor,training,test,fmax=None):
+"""
+ Maximum probability of improvement.
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ fmax: float
+ Max value of posterior probability distribution.
+ If not set, the maximum value of posterior mean for training is used.
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ fmean=predictor.get_post_fmean(training,test)
+ fcov=predictor.get_post_fcov(training,test)
+ fstd=np.sqrt(fcov)
+
+ iffmaxisNone:
+ fmax=np.max(predictor.get_post_fmean(training,training))
+
+ temp=(fmean-fmax)/fstd
+ score=scipy.stats.norm.cdf(temp)
+ returnscore
+
+
+
+
+[ドキュメント]
+defTS(predictor,training,test,alpha=1):
+"""
+ Thompson sampling (See Sec. 2.1 in Materials Discovery Volume 4, June 2016, Pages 18-21)
+
+ Parameters
+ ----------
+ predictor: predictor object
+ Base class is defined in physbo.predictor.
+ training: physbo.variable
+ Training dataset.
+ If the predictor is not trained, use this for training.
+ test: physbo.variable
+ Inputs
+ alpha: float
+ noise for sampling source
+ (default: 1.0)
+ Returns
+ -------
+ score: numpy.ndarray
+ """
+ return(predictor.get_post_samples(training,test,alpha=alpha)).flatten()
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+importscipy.stats
+
+from.paretoimportPareto
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+# SPDX-License-Identifier: MPL-2.0
+# Copyright (C) 2020- The University of Tokyo
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+importnumpyasnp
+
+
+
+[ドキュメント]
+classvariable(object):
+ def__init__(self,X=None,t=None,Z=None):
+"""
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ Z:
+
+ """
+ self.X=X
+ self.Z=Z
+ self.t=t
+
+
+[ドキュメント]
+ defget_subset(self,index):
+"""
+ Getting subset of variables.
+
+ Parameters
+ ----------
+ index: int or array of int
+ Index of selected action.
+ Returns
+ -------
+ variable: physbo.variable
+ """
+ temp_X=self.X[index,:]ifself.XisnotNoneelseNone
+ temp_t=self.t[index]ifself.tisnotNoneelseNone
+ temp_Z=self.Z[index,:]ifself.ZisnotNoneelseNone
+
+ returnvariable(X=temp_X,t=temp_t,Z=temp_Z)
+
+
+
+[ドキュメント]
+ defdelete(self,num_row):
+"""
+ Deleting variables of X, t, Z whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ self.delete_X(num_row)
+ self.delete_t(num_row)
+ self.delete_Z(num_row)
+
+
+
+[ドキュメント]
+ defadd(self,X=None,t=None,Z=None):
+"""
+ Adding variables of X, t, Z.
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+ Z
+
+ Returns
+ -------
+
+ """
+ self.add_X(X)
+ self.add_t(t)
+ self.add_Z(Z)
+
+
+
+[ドキュメント]
+ defdelete_X(self,num_row):
+"""
+ Deleting variables of X whose indexes are specified by num_row.
+
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.XisnotNone:
+ self.X=np.delete(self.X,num_row,0)
+
+
+
+[ドキュメント]
+ defdelete_t(self,num_row):
+"""
+ Deleting variables of t whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.tisnotNone:
+ self.t=np.delete(self.t,num_row)
+
+
+
+[ドキュメント]
+ defdelete_Z(self,num_row):
+"""
+ Deleting variables of Z whose indexes are specified by num_row.
+
+ Parameters
+ ----------
+ num_row: numpy array
+ Index array to be deleted.
+
+ Returns
+ -------
+
+ """
+ ifself.ZisnotNone:
+ self.Z=np.delete(self.Z,num_row,0)
+
+
+
+[ドキュメント]
+ defadd_X(self,X=None):
+"""
+ Adding variable X. If self.X is None, self.X is set as X.
+
+ Parameters
+ ----------
+ X: numpy array
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
+ Returns
+ -------
+
+ """
+ ifXisnotNone:
+ ifself.XisnotNone:
+ self.X=np.vstack((self.X,X))
+ else:
+ self.X=X
+
+
+
+[ドキュメント]
+ defadd_t(self,t=None):
+"""
+ Adding variable t. If self.t is None, self.t is set as t.
+
+ Parameters
+ ----------
+ t: numpy array
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
+ Returns
+ -------
+
+ """
+ ifnotisinstance(t,np.ndarray):
+ t=np.array([t])
+
+ iftisnotNone:
+ ifself.tisnotNone:
+ self.t=np.hstack((self.t,t))
+ else:
+ self.t=t
+
+
+
+[ドキュメント]
+ defadd_Z(self,Z=None):
+"""
+ Adding variable Z. If self.Z is None, self.Z is set as Z.
+
+ Parameters
+ ----------
+ Z
+
+ Returns
+ -------
+
+ """
+ ifZisnotNone:
+ ifself.ZisNone:
+ self.Z=Z
+ else:
+ self.Z=np.vstack((self.Z,Z))
+
+
+
+[ドキュメント]
+ defsave(self,file_name):
+"""
+ Saving variables X, t, Z to the file.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name for saving variables X, t, Z using numpy.savez_compressed.
+
+ Returns
+ -------
+
+ """
+ np.savez_compressed(file_name,X=self.X,t=self.t,Z=self.Z)
+
+
+
+[ドキュメント]
+ defload(self,file_name):
+"""
+ Loading variables X, t, Z from the file.
+
+ Parameters
+ ----------
+ file_name: str
+ A file name for loading variables X, t, Z using numpy.load.
+
+ Returns
+ -------
+
+ """
+ data=np.load(file_name,allow_pickle=True)
+ self.X=data["X"]
+ self.t=data["t"]
+ self.Z=data["Z"]
A. Rahimi and B. Recht, "Random features for large-scale kernel machines,"
+in "Advances in neural information processing systems," 2007, pp. 1177-1184.
compute the covariant matrix
+:param X: N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
+:type X: numpy.ndarray
+:param Z: N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of search candidate.
+:type Z: numpy.ndarray
+:param params: Parameters
+:type params: numpy.ndarray
+:param diag: If X is the diagonalization matrix, true.
+:type diag: bool
max_num_probes (int) -- Maximum number of searching process by Bayesian optimization.
+
num_search_each_probe (int) -- Number of searching by Bayesian optimization at each process.
+
predictor (predictor object) -- Base class is defined in physbo.predictor.
+If None, blm_predictor is defined.
+
is_disp (bool) -- If true, process messages are outputted.
+
simulator (callable) -- Callable (function or object with __call__)
+Here, action is an integer which represents the index of the candidate.
+
score (str) -- The type of aquision funciton.
+TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+
interval (int) -- The interval number of learning the hyper parameter.
+If you set the negative value to interval, the hyper parameter learning is not performed.
+If you set zero to interval, the hyper parameter learning is performed only at the first step.
+
num_rand_basis (int) -- The number of basis function. If you choose 0, ordinary Gaussian process run.
mode (str) -- The type of aquisition funciton. TS, EI and PI are available.
+These functions are defined in score.py.
+
actions (array of int) -- actions to calculate score
+
xs (physbo.variable or np.ndarray) -- input parameters to calculate score
+
predictor (predictor object) -- predictor used to calculate score.
+If not given, self.predictor will be used.
+
training (physbo.variable) -- Training dataset.
+If not given, self.training will be used.
+
parallel (bool) -- Calculate scores in parallel by MPI (default: True)
+
alpha (float) -- Tuning parameter which is used if mode = TS.
+In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+
+
戻り値:
+
f -- Score defined in each mode.
+
+
戻り値の型:
+
float or list of float
+
+
例外:
+
RuntimeError -- If both actions and xs are given
+
+
+
メモ
+
When neither actions nor xs are given, scores for actions not yet searched will be calculated.
+
When parallel is True, it is assumed that the function receives the same input (actions or xs) for all the ranks.
+If you want to split the input array itself, set parallel be False and merge results by yourself.
max_num_probes (int) -- Maximum number of random search process.
+
num_search_each_probe (int) -- Number of search at each random search process.
+
simulator (callable) -- Callable (function or object with __call__) from action to t
+Here, action is an integer which represents the index of the candidate.
+
is_disp (bool) -- If true, process messages are outputted.
Writing history (update history, not output to a file).
+
+
パラメータ:
+
+
action (numpy.ndarray) -- Indexes of actions.
+
t (numpy.ndarray) -- N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
X (numpy.ndarray) -- N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
time_total (numpy.ndarray) -- N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) -- N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) -- N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) -- N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
t (numpy.ndarray) -- N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
action (numpy.ndarray) -- N dimensional array. The indexes of actions of each search candidate.
+
time_total (numpy.ndarray) -- N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) -- N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) -- N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) -- N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
max_num_probes (int) -- Maximum number of searching process by Bayesian optimization.
+
num_search_each_probe (int) -- Number of searching by Bayesian optimization at each process.
+
predictor (predictor object) -- Base class is defined in physbo.predictor.
+If None, blm_predictor is defined.
+
is_disp (bool) -- If true, process messages are outputted.
+
simulator (callable) -- Callable (function or object with __call__)
+Here, action is an integer which represents the index of the candidate.
+
score (str) -- The type of aquision funciton.
+TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
+
interval (int) -- The interval number of learning the hyper parameter.
+If you set the negative value to interval, the hyper parameter learning is not performed.
+If you set zero to interval, the hyper parameter learning is performed only at the first step.
+
num_rand_basis (int) -- The number of basis function. If you choose 0, ordinary Gaussian process run.
mode (str) -- The type of aquisition funciton. TS, EI and PI are available.
+These functions are defined in score.py.
+
actions (array of int) -- actions to calculate score
+
xs (physbo.variable or np.ndarray) -- input parameters to calculate score
+
predictor (predictor object) -- predictor used to calculate score.
+If not given, self.predictor will be used.
+
training (physbo.variable) -- Training dataset.
+If not given, self.training will be used.
+
parallel (bool) -- Calculate scores in parallel by MPI (default: True)
+
alpha (float) -- Tuning parameter which is used if mode = TS.
+In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
+
+
+
戻り値:
+
f -- Score defined in each mode.
+
+
戻り値の型:
+
float or list of float
+
+
例外:
+
RuntimeError -- If both actions and xs are given
+
+
+
メモ
+
When neither actions nor xs are given, scores for actions not yet searched will be calculated.
+
When parallel is True, it is assumed that the function receives the same input (actions or xs) for all the ranks.
+If you want to split the input array itself, set parallel be False and merge results by yourself.
max_num_probes (int) -- Maximum number of random search process.
+
num_search_each_probe (int) -- Number of search at each random search process.
+
simulator (callable) -- Callable (function or object with __call__) from action to t
+Here, action is an integer which represents the index of the candidate.
+
is_disp (bool) -- If true, process messages are outputted.
Writing history (update history, not output to a file).
+
+
パラメータ:
+
+
action (numpy.ndarray) -- Indexes of actions.
+
t (numpy.ndarray) -- N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
+
X (numpy.ndarray) -- N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
+
time_total (numpy.ndarray) -- N dimenstional array. The total elapsed time in each step.
+If None (default), filled by 0.0.
+
time_update_predictor (numpy.ndarray) -- N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
+If None (default), filled by 0.0.
+
time_get_action (numpy.ndarray) -- N dimenstional array. The elapsed time for getting next action in each step.
+If None (default), filled by 0.0.
+
time_run_simulator (numpy.ndarray) -- N dimenstional array. The elapsed time for running the simulator in each step.
+If None (default), filled by 0.0.
Calculate scores (acquisition function) for test data.
+
+
パラメータ:
+
+
mode (str) --
Kind of score.
+
"EI", "PI", and "TS" are available.
+
+
predictor (predictor object) -- Base class is defined in physbo.predictor.
+
training (physbo.variable) -- Training dataset.
+If the predictor is not trained, use this for training.
+
test (physbo.variable) -- Inputs
+
fmax (float) -- Max value of mean of posterior probability distribution.
+If not set, the maximum value of posterior mean for training is used.
+Used only for mode == "EI" and "PI"
+
alpha (float) -- noise for sampling source (default: 1.0)
+Used only for mode == "TS"
Reference: (Couckuyt et al., 2014) Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization
Calculate Hypervolume-based Probability of Improvement (HVPI).
+
Reference: (Couckuyt et al., 2014) Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization
本チュートリアルでは例として、Cuの安定した界面構造の探索問題を扱います。 目的関数の評価にあたる構造緩和計算には、実際には1回あたり数時間といったオーダーの時間を要しますが、本チュートリアルでは既に評価済みの値を使用します。問題設定については、以下の文献を参照してください。 S. Kiyohara, H. Oda, K. Tsuda and T. Mizoguchi, “Acceleration of stable interface structure searching using a kriging approach”, Jpn. J. Appl. Phys. 55, 045502 (2016).
+
データセットファイル s5-210.csv を data ディレクトリ以下に保存し、次のように読み出します。
最初に関数を探索する空間を定義します。 以下の例では、探索空間Xを x_min=-2.0からx_max=2.0までwindow_num=10001分割で刻んだグリッドで定義しています。 なお、Xは window_num x d のndarray形式にする必要があります(dは次元数、この場合は1次元)。そのため、reshapeを行って変形しています。
Van Veldhuizen, David A. Multiobjective evolutionary algorithms: classifications, analyses, and new innovations. No. AFIT/DS/ENG/99-01. AIR FORCE INST OF TECH WRIGHT-PATTERSONAFB OH SCHOOL OF ENGINEERING, 1999.
Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. 「Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.」 Journal of Global Optimization 60.3 (2014): 575-594.
Couckuyt, Ivo, Dirk Deschrijver, and Tom Dhaene. 「Fast calculation of multiobjective probability of improvement and expected improvement criteria for Pareto optimization.」 Journal of Global Optimization 60.3 (2014): 575-594.
Yahyaa, Saba Q., and Bernard Manderick. 「Thompson sampling for multi-objective multi-armed bandits problem.」 Proc. Eur. Symp. Artif. Neural Netw., Comput. Intell. Mach. Learn.. 2015.
その他のチュートリアルと同じように、最初に関数を探索する空間を定義します。 以下の例では、探索空間Xを x_min=-2.0からx_max=2.0までwindow_num=10001分割で刻んだグリッドで定義しています。 なお、Xは window_num x d のndarray形式にする必要があります(dは次元数、この場合は2次元)。そのため、reshapeを行って変形しています。
fx_list=[]
+x_list=[]
+#In
+# Design of policy
+# Declaring the policy by
+policy=physbo.search.discrete.policy(test_X=X)
+# test_X is the set of candidates which is represented by numpy.array.
+# Each row vector represents the feature vector of the corresponding candidate
+
+# set the seed parameter
+policy.set_seed(1)
+
+
+# If you want to perform the initial random search before starting the Bayesian optimization,
+# the random sampling is performed by
+
+res=policy.random_search(max_num_probes=50,simulator=simulator())
+# Input:
+# max_num_probes: number of random search
+# simulator = simulator
+# output: combo.search.discreate.results (class)
+
# single query Bayesian search
+# The single query version of COMBO is performed by
+res=policy.bayes_search(max_num_probes=150,simulator=simulator(),score='TS',
+ interval=20,num_rand_basis=5000)
+
+# Input
+# max_num_probes: number of searching by Bayesian optimization
+# simulator: the class of simulator which is defined above
+# score: the type of aquision funciton. TS, EI and PI are available
+# interval: the timing for learning the hyper parameter.
+# In this case, the hyper parameter is learned at each 20 steps
+# If you set the negative value to interval, the hyper parameter learning is not performed
+# If you set zero to interval, the hyper parameter learning is performed only at the first step
+# num_rand_basis: the number of basis function. If you choose 0, ordinary Gaussian process runs
+
#In
+best_fx,best_action=res.export_all_sequence_best_fx()
+
+importmatplotlib.pyplotasplt
+# The result of searching is summarized in the class combo.search.discrete.results.history()
+# res.fx: observed negative energy at each step
+# res.chosen_actions: history of choosed actions
+# fbest, best_action= res.export_all_sequence_best_fx(): current best fx and current best action
+# that has been observed until each step
+# res.total_num_search: total number of search
+plt.plot(res.fx[0:res.total_num_search])
+