Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 12 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
Summary
=======
# Summary

This repository contains some basic code for Bayesian optimization

Installation
============

Install the current development version of scikit-learn (or sklearn version 0.18 once this is available)
## Installation

git clone git@github.com:scikit-learn/scikit-learn.git
cd sklearn
sudo python setup.py install
Install dependencies

1. Install [nlopt](https://github.com/stevengj/nlopt) for your Python version.
2. Install dependencies with pip: `[sudo] pip[3] install -r requirements.txt`
(requirements.txt can be found in the repository)
3. (Optional:) install [BOLeRo](https://github.com/rock-learning/bolero)

Install `bayesian_optimization`

git clone git@git.hb.dfki.de:jmetzen/bayesian_optimization.git
git clone https://github.com/rock-learning/bayesian_optimization.git
cd bayesian_optimization
sudo python setup.py install


Usage
=====
## Usage

Some usage examples are contained in the folder "examples". To reproduce the results from the ICML 2016 paper
"Minimum Regret Search for Single- and Multi-Task Optimization", please execute the jupyter notebook "examples/mrs_evaluation.ipynb."

Expand Down
13 changes: 7 additions & 6 deletions bayesian_optimization/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from bayesian_optimization import (BayesianOptimizer, REMBOOptimizer,
InterleavedREMBOOptimizer)
from model import GaussianProcessModel
from acquisition_functions import (ProbabilityOfImprovement,
ExpectedImprovement, UpperConfidenceBound, GPUpperConfidenceBound,
EntropySearch, MinimalRegretSearch, create_acquisition_function)
from .bayesian_optimization import (BayesianOptimizer, REMBOOptimizer,
InterleavedREMBOOptimizer)
from .model import GaussianProcessModel
from .acquisition_functions import (
ProbabilityOfImprovement, ExpectedImprovement, UpperConfidenceBound,
GPUpperConfidenceBound, EntropySearch, MinimalRegretSearch,
create_acquisition_function)
3 changes: 0 additions & 3 deletions bayesian_optimization/acquisition_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
from scipy.stats import norm, entropy
from scipy.special import erf

from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors


class AcquisitionFunction(object):
""" Abstract base class for acquisition functions."""
Expand Down
4 changes: 2 additions & 2 deletions bayesian_optimization/bayesian_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def __init__(self, interleaved_runs=2, *args, **kwargs):
*args, **kwargs)
for run in range(interleaved_runs)]
self.rembos = cycle(self.rembos)
self.current_rembo = self.rembos.next()
self.current_rembo = next(self.rembos)

self.X_ = []
self.y_ = []
Expand Down Expand Up @@ -361,4 +361,4 @@ def update(self, X, y):
self.y_.append(y)

self.current_rembo.update(X, y)
self.current_rembo = self.rembos.next()
self.current_rembo = next(self.rembos)
6 changes: 3 additions & 3 deletions bolero_bayes_opt/examples/plot_bops_optimizer_catapult.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@


kernel = C(100.0, (1.0, 10000.0)) \
* Matern(l=(1.0, 1.0), l_bounds=[(0.1, 100), (0.1, 100)])
* Matern(length_scale=(1.0, 1.0), length_scale_bounds=[(0.1, 100), (0.1, 100)])

opt = BOPSOptimizer(
boundaries=[(5, 10), (0, np.pi/2)], bo_type="bo",
acquisition_function="UCB", acq_fct_kwargs=dict(kappa=2.5),
optimizer="direct+lbfgs", maxf=100,
gp_kwargs=dict(kernel=kernel, normalize_y=True, sigma_squared_n=1e-5))
gp_kwargs=dict(kernel=kernel, normalize_y=True, alpha=1e-5))


target = 4.0 # Fixed target
Expand All @@ -42,7 +42,7 @@

# Determine reward landscape
v = np.linspace(5.0, 10.0, 100)
theta = np.linspace(0.0, np.pi/2, 100)
theta = np.linspace(0.0, np.pi / 2, 100)
V, Theta = np.meshgrid(v, theta)

Z = np.array([[partial(catapult._compute_reward, context=context)
Expand Down
4 changes: 2 additions & 2 deletions bolero_bayes_opt/optimizer/bocps.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def __init__(self, boundaries, bo_type="bo", acquisition_function="UCB",
self.boundaries = boundaries
self.bo_type = bo_type
self.value_transform = value_transform
if isinstance(self.value_transform, basestring):
if isinstance(self.value_transform, str):
self.value_transform = eval(self.value_transform)
self.optimizer = optimizer

Expand Down Expand Up @@ -171,7 +171,7 @@ def set_evaluation_feedback(self, feedbacks):
self.policy_fitted = False

def best_policy(self, maxfun=15000, variance=0.01,
training=["model-free", "model_based"]):
training=["model-free", "model-based"]):
"""Returns the best (greedy) policy learned so far.

Parameters
Expand Down
159 changes: 63 additions & 96 deletions examples/rosenbrock_bayesian_optimization.ipynb

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
scikit-learn
scipy