Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature logging #293

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ classifiers = [
dependencies = [
"numpy>=1.14.5",
"scipy>=1.5.0",
"click==8.1.3",
]

[project.optional-dependencies]
Expand Down
79 changes: 49 additions & 30 deletions src/spotpy/algorithms/_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import numpy as np

from spotpy import database, parameter
from spotpy import database, parameter, spotpylogging

try:
from queue import Queue
Expand All @@ -36,15 +36,18 @@ class _RunStatistic(object):

def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction # grid, mazimize, minimize
print(
"Initializing the ", algorithm_name, " with ", repetitions, " repetitions"

self.logger = spotpylogging.get_logger("RunStatistic(%s)" % algorithm_name)

self.logger.info(
"Initializing the %s with %s repetitions", algorithm_name, repetitions
)
if optimization_direction == "minimize":
self.compare = self.minimizer
print("The objective function will be minimized")
self.logger.info("The objective function will be minimized")
if optimization_direction == "maximize":
self.compare = self.maximizer
print("The objective function will be maximized")
self.logger.info("The objective function will be maxiimized")
if optimization_direction == "grid":
self.compare = self.grid

Expand Down Expand Up @@ -131,46 +134,54 @@ def print_status(self):
timestr,
)

print(text)
self.logger.info(text)
self.last_print = time.time()

def print_status_final(self):
print("\n*** Final SPOTPY summary ***")
print(
"Total Duration: "
+ str(round((time.time() - self.starttime), 2))
+ " seconds"
self.logger.info("")
self.logger.info("*** Final SPOTPY summary ***")
self.logger.info(
"Total Duration: %s seconds" % str(round((time.time() - self.starttime), 2))
)
print("Total Repetitions:", self.rep)
self.logger.info("Total Repetitions: %s", self.rep)

if self.optimization_direction == "minimize":
print("Minimal objective value: %g" % (self.objectivefunction_min))
print("Corresponding parameter setting:")
self.logger.info(
"Minimal objective value: %g" % (self.objectivefunction_min)
)
self.logger.info("Corresponding parameter setting:")
for i in range(self.parameters):
text = "%s: %g" % (self.parnames[i], self.params_min[i])
print(text)
self.logger.info(text)

if self.optimization_direction == "maximize":
print("Maximal objective value: %g" % (self.objectivefunction_max))
print("Corresponding parameter setting:")
self.logger.info(
"Maximal objective value: %g" % (self.objectivefunction_max)
)
self.logger.info("Corresponding parameter setting:")
for i in range(self.parameters):
text = "%s: %g" % (self.parnames[i], self.params_max[i])
print(text)
self.logger.info(text)

if self.optimization_direction == "grid":
print("Minimal objective value: %g" % (self.objectivefunction_min))
print("Corresponding parameter setting:")
self.logger.info(
"Minimal objective value: %g" % (self.objectivefunction_min)
)
self.logger.info("Corresponding parameter setting:")
for i in range(self.parameters):
text = "%s: %g" % (self.parnames[i], self.params_min[i])
print(text)
self.logger.info(text)

print("Maximal objective value: %g" % (self.objectivefunction_max))
print("Corresponding parameter setting:")
self.logger.info(
"Maximal objective value: %g" % (self.objectivefunction_max)
)
self.logger.info("Corresponding parameter setting:")
for i in range(self.parameters):
text = "%s: %g" % (self.parnames[i], self.params_max[i])
print(text)
self.logger.info(text)

print("******************************\n")
self.logger.info("******************************")
self.logger.info("")

def __repr__(self):
return "Min objectivefunction: %g \n Max objectivefunction: %g" % (
Expand Down Expand Up @@ -241,8 +252,16 @@ def __init__(
random_state=None,
optimization_direction="grid",
algorithm_name="",
quiet=False,
logfile=None,
logdir=None,
):

# Instatiate logging
self.logger = spotpylogging.instantiate_logger(
self.__class__.__name__, quiet, logfile, logdir
)

# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(
Expand Down Expand Up @@ -292,11 +311,11 @@ def __init__(
self._return_all_likes = False # allows multi-objective calibration if set to True, is set by the algorithm

if breakpoint == "read" or breakpoint == "readandwrite":
print("Reading backupfile")
self.logger.info("Reading backupfile")
try:
open(self.dbname + ".break")
except FileNotFoundError:
print("Backupfile not found")
self.logger.info("Backupfile not found")
self.dbappend = True

# Now a repeater (ForEach-object) is loaded
Expand Down Expand Up @@ -370,7 +389,7 @@ def final_call(self):

def _init_database(self, like, randompar, simulations):
if self.dbinit:
print("Initialize database...")
self.logger.info("Initialize database...")

self.datawriter = database.get_datawriter(
self.dbformat,
Expand Down Expand Up @@ -494,15 +513,15 @@ def getfitness(self, simulation, params):
Calls the user defined spot_setup objectivefunction
"""
try:
# print('Using parameters in fitness function')
# self.logger.info('Using parameters in fitness function')
return self.setup.objectivefunction(
evaluation=self.evaluation,
simulation=simulation,
params=(params, self.parnames),
)

except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
# print('Not using parameters in fitness function')
# self.logger.info('Not using parameters in fitness function')
return self.setup.objectivefunction(
evaluation=self.evaluation, simulation=simulation
)
Expand Down
24 changes: 12 additions & 12 deletions src/spotpy/algorithms/abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def sample(
sets the limit
"""
self.set_repetiton(repetitions)
print(
"Starting the ABC algotrithm with " + str(repetitions) + " repetitions..."
self.logger.info(
"Starting the ABC algotrithm with %s repetitions...", repetitions
)
# Initialize ABC parameters:
randompar = self.parameter()["random"]
Expand All @@ -106,7 +106,7 @@ def sample(
work.append([like, randompar, like, randompar, c, p])
icall += 1
if self.status.stop:
print("Stopping sampling")
self.logger.debug("Stopping sampling")
break

while icall < repetitions and gnrng > peps:
Expand Down Expand Up @@ -141,7 +141,7 @@ def sample(
work[rep][4] = work[rep][4] + 1
icall += 1
if self.status.stop:
print("Stopping samplig")
self.logger.debug("Stopping samplig")
break # Probability distribution for roulette wheel selection
bn = []
for i, val in enumerate(work):
Expand Down Expand Up @@ -191,7 +191,7 @@ def sample(
work[rep][4] = work[rep][4] + 1
icall += 1
if self.status.stop:
print("Stopping samplig")
self.logger.debug("Stopping samplig")
break
# Scout bee phase
for i, val in enumerate(work):
Expand All @@ -205,18 +205,18 @@ def sample(
work[i][0] = clike
icall += 1
if self.status.stop:
print("Stopping samplig")
self.logger.debug("Stopping samplig")
break
gnrng = -self.status.objectivefunction_max
if icall >= repetitions:
print("*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT")
print("ON THE MAXIMUM NUMBER OF TRIALS ")
print(repetitions)
print("HAS BEEN EXCEEDED.")
self.logger.info("*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT")
self.logger.info("ON THE MAXIMUM NUMBER OF TRIALS ")
self.logger.info(repetitions)
self.logger.info("HAS BEEN EXCEEDED.")
Comment on lines +212 to +215
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this be a warning?


if gnrng < peps:
print(
self.logger.info(
"THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE AT RUN"
)
print(icall)
self.logger.info(icall)
self.final_call()
24 changes: 10 additions & 14 deletions src/spotpy/algorithms/dds.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,8 @@ def sample(self, repetitions, trials=1, x_initial=np.array([])):
self.parameter()["minbound"],
self.parameter()["maxbound"],
)
print(
"Starting the DDS algotrithm with " + str(repetitions) + " repetitions..."
self.logger.info(
"Starting the DDS algotrithm with %s repetitions...", repetitions
)

number_of_parameters = (
Expand Down Expand Up @@ -334,12 +334,10 @@ def sample(self, repetitions, trials=1, x_initial=np.array([])):
self.params_max = list(x_curr)
self.params_max = self.fix_status_params_format(self.params_max)

print(
"Best solution found has obj function value of "
+ str(objectivefunction_max)
+ " at "
+ str(repitionno_best)
+ "\n\n"
self.logger.debug(
"Best solution found has obj function value of %s at %s\n\n",
objectivefunction_max,
repitionno_best,
)
debug_results.append(
{
Expand Down Expand Up @@ -370,12 +368,10 @@ def calc_initial_para_configuration(
# by trying which randomized generated input matches best
# initial_iterations is the number of function evaluations to initialize the DDS algorithm solution
if initial_iterations > 1:
print(
"Finding best starting point for trial "
+ str(trial + 1)
+ " using "
+ str(initial_iterations)
+ " random samples."
self.logger.debug(
"Finding best starting point for trial %s using %s random samples.",
trial + 1,
initial_iterations,
)
repetions_left = (
repetitions - initial_iterations
Expand Down
26 changes: 16 additions & 10 deletions src/spotpy/algorithms/demcz.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

import numpy as np

from spotpy import spotpylogging

from . import _algorithm


Expand Down Expand Up @@ -104,7 +106,9 @@ def check_par_validity(self, par):
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print("ERROR Bounds have not the same lenghts as Parameterarray")
self.logger.error(
"ERROR Bounds have not the same lenghts as Parameterarray"
)
return par

def sample(
Expand Down Expand Up @@ -143,8 +147,8 @@ def sample(
"""

self.set_repetiton(repetitions)
print(
"Starting the DEMCz algotrithm with " + str(repetitions) + " repetitions..."
self.logger.info(
"Starting the DEMCz algotrithm with %s repetitions...", repetitions
)

self.min_bound, self.max_bound = (
Expand Down Expand Up @@ -220,9 +224,9 @@ def sample(
# 3) and we have not done more than the maximum number of iterations

while cur_iter < maxChainDraws:
print(cur_iter, burnIn)
self.logger.debug("%s, %s", cur_iter, burnIn)
if cur_iter == burnIn:
print("starting")
self.logger.debug("starting")
history.start_sampling()

# every5th iteration allow a big jump
Expand Down Expand Up @@ -349,7 +353,7 @@ def sample(
covConvergence.update(history, "interest")
if all(grConvergence.R < convergenceCriteria):
cur_iter = maxChainDraws
print(
self.logger.info(
"All chains fullfil the convergence criteria. Sampling stopped."
)
cur_iter += 1
Expand All @@ -362,8 +366,8 @@ def sample(
self.iter = cur_iter
self.burnIn = burnIn
self.R = grConvergence.R
text = "Gelman Rubin R=" + str(self.R)
print(text)

self.logger.info("Gelman Rubin R=%s", self.R)
self.status.rep = self.status.repetitions
self.final_call()

Expand Down Expand Up @@ -561,9 +565,11 @@ def rv(relevantHistory):

try:
projection = np.dot(np.linalg.inv(basis1), basis2)
except np.linalg.linalg.LinAlgError:
except np.linalg.linalg.LinAlgError as e:
projection = np.array(basis1) * np.nan
print("Exception happend!")
spotpylogging.get_logger("_CovarianceConvergence()").logger.debug(
"Exception happend!\nExcpetion:%s", e
)

# find the releative size in each of the basis1 directions
return np.log(np.sum(projection**2, axis=0) ** 0.5)
Expand Down
Loading