Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue/#159 - consistent names for loggers #180

Merged
merged 6 commits into from
Mar 15, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions smac/epm/random_epm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(self, rng):
----------
rng : np.random.RandomState
'''
self.logger = logging.getLogger("RandomEPM")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.rng = rng

def train(self, X, Y, **kwargs):
Expand Down Expand Up @@ -83,4 +83,4 @@ def predict_marginalized_over_instances(self, X):
Predictive variance
"""

return self.rng.rand(len(X), 1), self.rng.rand(len(X), 1)
return self.rng.rand(len(X), 1), self.rng.rand(len(X), 1)
4 changes: 2 additions & 2 deletions smac/epm/rf_with_instances.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(self, types,
min_samples_leaf, max_depth, eps_purity, seed]
self.seed = seed

self.logger = logging.getLogger("RF")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)

# Never use a lower variance than this
self.var_threshold = 10 ** -5
Expand Down Expand Up @@ -137,7 +137,7 @@ def predict(self, X):
(self.types.shape[0], X.shape[1]))

means, vars = self.rf.batch_predictions(X)

return means.reshape((-1, 1)), vars.reshape((-1, 1))

def predict_marginalized_over_instances(self, X):
Expand Down
6 changes: 3 additions & 3 deletions smac/epm/rfr_imputator.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@ def __init__(self, rs, cutoff, threshold,
highest possible values (e.g. cutoff * par)
model:
epm model (i.e. RandomForestWithInstances)
change_threshold : float
change_threshold : float
stop imputation if change is less than this
max_iter : maximum number of iteration
max_iter : maximum number of iteration
-------
"""

super(RFRImputator, self).__init__()
self.logger = logging.getLogger("RFRImputor")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.max_iter = max_iter
self.change_threshold = change_threshold
self.cutoff = cutoff
Expand Down
2 changes: 1 addition & 1 deletion smac/facade/func_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def fmin_smac(func: callable,
scenario = Scenario(scenario_dict)

smac = SMAC(scenario=scenario, tae_runner=ta, rng=rng)
smac.logger = logging.getLogger("fmin_smac")
smac.logger = logging.getLogger(smac.__module__ + "." + smac.__class__.__name__)
incumbent = smac.optimize()

config_id = smac.solver.runhistory.config_ids[incumbent]
Expand Down
6 changes: 3 additions & 3 deletions smac/facade/roar_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ def __init__(self,
initial_design: InitialDesign
initial sampling design
initial_configurations: typing.List[Configuration]
list of initial configurations for initial design --
list of initial configurations for initial design --
cannot be used together with initial_design
stats: Stats
optional stats object
rng: np.random.RandomState
Random number generator
'''
self.logger = logging.getLogger("ROAR")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)

# initial random number generator
num_run, rng = self._get_rng(rng=rng)
Expand All @@ -74,7 +74,7 @@ def __init__(self,
(scenario=scenario, num_params=num_params,
success_states=[StatusType.SUCCESS, ],
impute_censored_data=False, impute_state=None)

# use SMAC facade
super().__init__(
scenario=scenario,
Expand Down
10 changes: 5 additions & 5 deletions smac/facade/smac_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,14 @@ def __init__(self,
initial_design: InitialDesign
initial sampling design
initial_configurations: typing.List[Configuration]
list of initial configurations for initial design --
list of initial configurations for initial design --
cannot be used together with initial_design
stats: Stats
optional stats object
rng: np.random.RandomState
Random number generator
'''
self.logger = logging.getLogger("SMAC")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)

aggregate_func = average_cost

Expand Down Expand Up @@ -188,15 +188,15 @@ def __init__(self,
if initial_design is not None and initial_configurations is not None:
raise ValueError(
"Either use initial_design or initial_configurations; but not both")

if initial_configurations is not None:
initial_design = MultiConfigInitialDesign(tae_runner=tae_runner,
scenario=scenario,
stats=self.stats,
traj_logger=traj_logger,
runhistory=runhistory,
rng=rng,
configs=initial_configurations,
configs=initial_configurations,
intensifier=intensifier,
aggregate_func=aggregate_func)
elif initial_design is None:
Expand Down Expand Up @@ -268,7 +268,7 @@ def __init__(self,

def _get_rng(self, rng):
'''
initial random number generator
initial random number generator

Arguments
---------
Expand Down
8 changes: 4 additions & 4 deletions smac/initial_design/default_configuration_design.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@


class DefaultConfiguration(SingleConfigInitialDesign):
def __init__(self,

def __init__(self,
tae_runner: ExecuteTARun,
scenario: Scenario,
stats: Stats,
Expand All @@ -24,7 +24,7 @@ def __init__(self,
):
'''
Constructor

Arguments
---------
tae_runner: ExecuteTARun
Expand All @@ -39,7 +39,7 @@ def __init__(self,
rng: np.random.RandomState
random state
'''
super().__init__(tae_runner=tae_runner,
super().__init__(tae_runner=tae_runner,
scenario=scenario,
stats=stats,
traj_logger=traj_logger,
Expand Down
12 changes: 6 additions & 6 deletions smac/initial_design/initial_design.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@


class InitialDesign(object):
def __init__(self,

def __init__(self,
tae_runner: ExecuteTARun,
scenario: Scenario,
stats: Stats,
Expand All @@ -24,7 +24,7 @@ def __init__(self,
):
'''
Constructor

Arguments
---------
tae_runner: ExecuteTARun
Expand All @@ -38,18 +38,18 @@ def __init__(self,
rng: np.random.RandomState
random state
'''

self.tae_runner = tae_runner
self.scenario = scenario
self.stats = stats
self.traj_logger = traj_logger
self.rng = rng
self.logger = logging.getLogger("InitialDesign")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)

def run(self):
'''
as an initial design: it simply runs the default configuration on random pair of instance and random seed

Returns
-------
incumbent: Configuration()
Expand Down
2 changes: 1 addition & 1 deletion smac/initial_design/multi_config_initial_design.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def run(self) -> Configuration:
if len(set(configs)) > 1:
# intensify will skip all challenger that are identical with the incumbent;
# if <configs> has only identical configurations,
# intensifiy will not do any configuration runs
# intensifiy will not do any configuration runs
# (also not on the incumbent)
# therefore, at least two different configurations have to be in <configs>
inc, inc_perf = self.intensifier.intensify(challengers=set(configs[1:]),
Expand Down
16 changes: 8 additions & 8 deletions smac/intensification/intensification.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(self, tae_runner, stats, traj_logger, rng, instances,
tae_runner : tae.executre_ta_run_*.ExecuteTARun* Object
target algorithm run executor
stats: Stats()
stats object
stats object
traj_logger: TrajLogger()
TrajLogger object to log all new incumbents
rng : np.random.RandomState
Expand Down Expand Up @@ -68,7 +68,7 @@ def __init__(self, tae_runner, stats, traj_logger, rng, instances,
self.instance_specifics = {}
else:
self.instance_specifics = instance_specifics
self.logger = logging.getLogger("intensifier")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.run_limit = run_limit
self.maxR = maxR
self.minR = minR
Expand Down Expand Up @@ -118,7 +118,7 @@ def intensify(self, challengers: typing.List[Configuration],
incumbent: Configuration()
current (maybe new) incumbent configuration
inc_perf: float
empirical performance of incumbent configuration
empirical performance of incumbent configuration
'''

self.start_time = time.time()
Expand Down Expand Up @@ -339,7 +339,7 @@ def _adapt_cutoff(self, challenger: Configuration,
run_history: RunHistory,
inc_sum_cost: float):
'''
adaptive capping:
adaptive capping:
compute cutoff based on time so far used for incumbent
and reduce cutoff for next run of challenger accordingly

Expand Down Expand Up @@ -380,19 +380,19 @@ def _adapt_cutoff(self, challenger: Configuration,
)
return cutoff

def _compare_configs(self, incumbent: Configuration,
challenger: Configuration,
def _compare_configs(self, incumbent: Configuration,
challenger: Configuration,
run_history: RunHistory,
aggregate_func: typing.Callable):
'''
compare two configuration wrt the runhistory
compare two configuration wrt the runhistory
and return the one which performs better (or None if the decision is not safe)

Decision strategy to return x as being better than y:
1. x has at least as many runs as y
2. x performs better than y on the intersection of runs on x and y

Implicit assumption:
Implicit assumption:
challenger was evaluated on the same instance-seed pairs as incumbent

Parameters
Expand Down
6 changes: 3 additions & 3 deletions smac/runhistory/runhistory.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def compute_all_costs(self, instances: typing.List[str]=None):

def incremental_update_cost(self, config: Configuration, cost: float):
'''
incrementally updates the performance of a configuration by using a moving average;
incrementally updates the performance of a configuration by using a moving average;

Arguments
--------
Expand Down Expand Up @@ -213,7 +213,7 @@ def empty(self):

Returns
----------
bool: True if runs have been added to the RunHistory,
bool: True if runs have been added to the RunHistory,
False otherwise
"""
return len(self.data) == 0
Expand Down Expand Up @@ -309,7 +309,7 @@ def update(self, runhistory, external_data:bool=False):
runhistory: RunHistory
runhistory with additional data to be added to self
external_data: bool
if True, run will not be added to self._configid_to_inst_seed
if True, run will not be added to self._configid_to_inst_seed
and not available through get_runs_for_config()
"""

Expand Down
3 changes: 1 addition & 2 deletions smac/runhistory/runhistory2epm.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def __init__(self, scenario, num_params,
rs : numpy.random.RandomState
only used for reshuffling data after imputation
'''
self.logger = logging.getLogger("runhistory2epm")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)

# General arguments
self.scenario = scenario
Expand Down Expand Up @@ -95,7 +95,6 @@ def __init__(self, scenario, num_params,
self.instance_features = scenario.feature_dict
self.n_feats = scenario.n_features

self.logger = logging.getLogger("runhistory2epm")
self.num_params = num_params

# Sanity checks
Expand Down
10 changes: 5 additions & 5 deletions smac/scenario/scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self, scenario, cmd_args=None):
command line arguments that were not processed by argparse

"""
self.logger = logging.getLogger("scenario")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.PCA_DIM = 7

self.in_reader = InputReader()
Expand Down Expand Up @@ -72,7 +72,7 @@ def __init__(self, scenario, cmd_args=None):
arg_name, arg_value = self._parse_argument(key, scenario, **value)
parsed_arguments[arg_name] = arg_value


if len(scenario) != 0:
raise ValueError('Could not parse the following arguments: %s' %
str(list(scenario.keys())))
Expand Down Expand Up @@ -183,7 +183,7 @@ def _parse_argument(self, name, scenario, help, callback=None, default=None,
normalized_key = key.lower().replace('-', '').replace('_', '')
if normalized_key == normalized_name:
value = scenario.pop(key)

if dest is None:
dest = name.lower().replace('-', '_')

Expand Down Expand Up @@ -311,7 +311,7 @@ def extract_instance_specific(instance_list):
self.feature_array.append(self.feature_dict[inst_])
self.feature_array = numpy.array(self.feature_array)
self.n_features = self.feature_array.shape[1]

# reduce dimensionality of features of larger than PCA_DIM
if self.feature_array.shape[1] > self.PCA_DIM:
X = self.feature_array
Expand Down Expand Up @@ -352,4 +352,4 @@ def __getstate__(self):

def __setstate__(self, d):
self.__dict__.update(d)
self.logger = logging.getLogger("scenario")
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
Loading