Skip to content
Snippets Groups Projects
Commit cd517748 authored by Franz Scherr's avatar Franz Scherr
Browse files

RBF approximating the objectivefunction serves as "caching" some results

parent bb0f08bf
No related branches found
No related tags found
No related merge requests found
import os
import warnings
import logging.config
import yaml
from pypet import Environment
from pypet import pypetconstants
from ltl.optimizees.functions.optimizee import FunctionGeneratorOptimizee
from ltl.optimizees.surrogate.optimizee import SurrogateOptimizee
from ltl.optimizers.crossentropy.optimizer import CrossEntropyOptimizer, CrossEntropyParameters
from ltl.paths import Paths
from ltl.optimizers.crossentropy.distribution import NoisyGaussian, BayesianGaussianMixture, Gaussian,\
NoisyBayesianGaussianMixture
warnings.filterwarnings("ignore")
logger = logging.getLogger('ltl-fun-ce')
def main():
name = 'LTL-FUN-CE'
root_dir_path = '/home/scherr/simulations' # CHANGE THIS to the directory where your simulation results are contained
assert root_dir_path is not None, \
"You have not set the root path to store your results." \
" Set it manually in the code (by setting the variable 'root_dir_path')" \
" before running the simulation"
paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)
with open("bin/logging.yaml") as f:
l_dict = yaml.load(f)
log_output_file = os.path.join(paths.results_path, l_dict['handlers']['file']['filename'])
l_dict['handlers']['file']['filename'] = log_output_file
logging.config.dictConfig(l_dict)
print("All output can be found in file ", log_output_file)
print("Change the values in logging.yaml to control log level and destination")
print("e.g. change the handler to console for the loggers you're interesting in to get output to stdout")
traj_file = os.path.join(paths.output_dir_path, 'data.h5')
# Create an environment that handles running our simulation
# This initializes a PyPet environment
env = Environment(trajectory=name, filename=traj_file, file_title='{} data'.format(name),
comment='{} data'.format(name),
add_time=True,
freeze_input=True,
multiproc=True,
use_scoop=True,
wrap_mode=pypetconstants.WRAP_MODE_LOCAL,
automatic_storing=True,
log_stdout=False, # Sends stdout to logs
log_folder=os.path.join(paths.output_dir_path, 'logs')
)
# Get the trajectory from the environment
traj = env.trajectory
# NOTE: Innerloop simulator
from ltl.optimizees.functions.function_generator import FunctionGenerator, GaussianParameters, LangermannParameters
fg_instance = FunctionGenerator([LangermannParameters('default', 'default')],
dims=2, bound=[0, 2])
# NOTE: Innerloop simulator
optimizee = FunctionGeneratorOptimizee(traj, fg_instance)
surrogate = SurrogateOptimizee(traj, optimizee)
# NOTE: Outerloop optimizer initialization
# TODO: Change the optimizer to the appropriate Optimizer class
parameters = CrossEntropyParameters(pop_size=50, rho=0.2, smoothing=0.0, temp_decay=0, n_iteration=30,
distribution=NoisyBayesianGaussianMixture(2, additive_noise=[1, 1],
noise_decay=0.99))
optimizer = CrossEntropyOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
optimizee_fitness_weights=(1,),
parameters=parameters,
optimizee_bounding_func=optimizee.bounding_func)
# Add post processing
env.add_postprocessing(optimizer.post_process)
# Run the simulation with all parameter combinations
env.run(surrogate.simulate)
# NOTE: Innerloop optimizee end
optimizee.end()
# NOTE: Outerloop optimizer end
optimizer.end()
# Finally disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
import ipdb
with ipdb.launch_ipdb_on_exception():
main()
import numpy as np
import sklearn.gaussian_process as gp
from ltl.optimizees.optimizee import Optimizee
from ltl import dict_to_list
class SurrogateOptimizee(Optimizee):
"""Surrogate Objective for optimizing computational hard objectives. Currently works with gaussian processes from
scikit learn.
"""
def __init__(self, traj, optimizee, noise_level=0.0):
self.optimizee = optimizee
self.individuals = []
self.results = []
self.n_samples = 0
self.min_samples = 20
self.kernel = gp.kernels.RBF(length_scale=0.1) + gp.kernels.WhiteKernel(noise_level=noise_level)
self.noise_level = noise_level
self.fitted = False
self.gaussian_processes = []
individual = optimizee.create_individual()
_, self.individual_dict_spec = dict_to_list(individual, get_dict_spec=True)
def create_individual(self):
"""
Delegates to optimize
"""
return self.optimizee.create_individual()
def bounding_func(self, individual):
"""
Delegates to optimizee
"""
return self.optimizee.bounding_func(individual)
def _traj_individual_to_dict(self, traj):
"""
transforms current traj individual to corresponding dict
:param traj: Trajectory containing the individual
:return: A dict representing the individual
"""
individual = dict()
for key in self.individual_dict_spec:
key = key[0]
individual[key] = traj.individual[key]
return individual
def simulate(self, traj):
"""
Tries to return an approximation, else runs original simulation
:param ~pypet.trajectory.Trajectory traj: Trajectory
:return: a single element :obj:`tuple` containing the value of the chosen function
"""
individual = np.array(dict_to_list(self._traj_individual_to_dict(traj)))
if self.n_samples > self.min_samples:
if self.fitted:
approximation = []
for gaussian_process in self.gaussian_processes:
y, std = gaussian_process.predict(individual, return_std=True)
approximation.append([y, std])
approximation = np.array(approximation)
if np.max(approximation[:, 1]) < 0.2 + self.noise_level:
print('approximation hit')
traj.f_add_result('$set.$.approximated', True)
return tuple(x for x in approximation[:, 0])
simulation_result = self.optimizee.simulate(traj)
# fit model
x = np.array(simulation_result)
self.individuals.append(individual)
self.results.append(x)
for i in range(len(x)):
if len(self.gaussian_processes) < len(x):
gpr = gp.GaussianProcessRegressor(kernel=self.kernel)
self.gaussian_processes.append(gpr)
else:
gpr = self.gaussian_processes[i]
gpr.fit(self.individuals, self.results)
self.fitted = True
self.n_samples += 1
return simulation_result
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment