# default configurations
seed: 2

# Model related parameters
# [exact, gp_prior, deepgp, dspp, heteroskedastic, ngd_model, ensemble_exactgp, fbgp_mcmc, fbgp_mcmc_gpytorch]
model_type: 'gp_prior'
set_prior: False
outputs: 1  # multiple output models [exact, heteroskedastic]
inducing_points: 100 # NB: This is not used. The number of inducing points is equal to the number of data points
hidden_dim: 1 # Hidden dimension (I think); how many hidden GPs to create outputs for
n_samples: 10 # In DeepGP w/ ELBO(-like) loss

# Initial data set
initial_samples: 1000
space_filling_design: 'lhs'  # ['uniform' (equidistant), 'random', 'lhs', 'sobol']
test_samples: 1089 #2178

# Data
al_type: 'population_based'  # [pool_based, psuedo_population_based, population_based]
simulator: 'hartmann6d'  # None, otherwise specify a simulator
# simulators: [homo1, motorcycle, gramacy1d, gramacy2d, higdon1d, homo1, homo2, branin2d, ishigami3d]
dataset: 'None'  # None, otherwise specify a dataset: [autompg, mercury3d, mercury3d-12]
path_train_data: ''
path_test_data: ''


# Transformation of data: [standardize, min_max_feature_scaling, minusone_one_feature_scaling, identity]
transformation_x: 'min_max_feature_scaling'
transformation_y: 'standardize'

# How to query the next point? [variance, cross_corr, combi, random, GSx, GSy, iGS, iGSr, cohns, qbc, qbc_emoc, mi]
# [mcmc_mean_variance, mcmc_qbc, mcmc_gmm, mcmc_bald]
selection_criteria: 'random'
min_change_in_var: -1 # relative change (set to -1 when not using)

# Parameters for the active learning scheme
plot: True
active_learning_steps: 1
k_samples: 1        # Number of unique points to query (should be equal to outputs, if one for each task)
beta_sampling: -1   # Distance between the points in the batch given by percentage of range. NB: This also affect initial sampling..
repeat_sampling: 1  # Number of times to sample for each input,
                    # i.e. equal 4 will give for simulations for each unique data point

# Hyperparameters for optimizing hyperparameters of GP
milestones: [300, 600]        # Epochs at which we will lower the learning rate by a factor (non-variational optimizer)
initial_lr: .1                # Initial learning rate
n_epochs: 1000                # Number of epochs in optimizing the hyperparameters
n_runs: 1                     # Number of runs to try to find the global minimum

# Settings for MCMC
num_chains: 5
num_samples: 300 #300
warmup_steps: 200 #200
predict_mcmc: 'mode'  # [mode, posterior, moments]

# Output file
output_file: 'example.pkl'
