diff --git a/docs/getting_started.md b/docs/getting_started.md index 54a4c67d..946e7f5d 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -14,7 +14,7 @@ The example comes along with parameter boundaries, the Rosenbrock function, the So we can directly start to analyse the Rosenbrock function with one of the algorithms. We start with a simple Monte Carlo sampling: # Give Monte Carlo algorithm the example setup and saves results in a RosenMC.csv file - sampler = spotpy.algorithms.mc(spotpy_setup(), dbname='RosenMC', dbformat='csv') + sampler = spotpy.algorithms.mc(spot_setup(), dbname='RosenMC', dbformat='csv') Now we can sample with the implemented Monte Carlo algorithm: @@ -35,7 +35,7 @@ We can see that the parameters *x* and *y*, which drive the the Rosenbrock funct If you want to see the best 10% of your samples, which is called posterior parameter distribution, you have to do something like this: - posterior=spotpy.analyser.get_posterior(results,percentage=10) + posterior=spotpy.analyser.get_posterior(results, percentage=10) spotpy.analyser.plot_parameterInteraction(posterior) This should give you a parameter interaction plot of your best 10% samples, which should look like Fig. 2: diff --git a/spotpy/__init__.py b/spotpy/__init__.py index 7a298068..3a3cf857 100644 --- a/spotpy/__init__.py +++ b/spotpy/__init__.py @@ -39,4 +39,4 @@ from . import describe # Contains some helper functions to describe smaplers and setups from .hydrology import signatures # Quantifies goodness of fit between simulation and evaluation data with hydrological signatures -__version__ = '1.4.6' \ No newline at end of file +__version__ = '1.5.0' \ No newline at end of file diff --git a/spotpy/algorithms/_algorithm.py b/spotpy/algorithms/_algorithm.py index d2d0bcfa..544d7613 100644 --- a/spotpy/algorithms/_algorithm.py +++ b/spotpy/algorithms/_algorithm.py @@ -10,12 +10,13 @@ from __future__ import division from __future__ import print_function from __future__ import unicode_literals -from spotpy import database, objectivefunctions +from spotpy import database from spotpy import parameter import numpy as np import time import threading + try: from queue import Queue except ImportError: @@ -36,37 +37,64 @@ class _RunStatistic(object): Usage: status = _RunStatistic() status(rep,like,params) - """ - def __init__(self): + def __init__(self, repetitions, algorithm_name, optimization_direction, parnames): + self.optimization_direction = optimization_direction #grid, mazimize, minimize + print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions') + if optimization_direction == 'minimize': + self.compare = self.minimizer + print('The objective function will be minimized') + if optimization_direction == 'maximize': + self.compare = self.maximizer + print('The objective function will be minimized') + if optimization_direction == 'grid': + self.compare = self.grid + self.rep = 0 - self.params = None - self.objectivefunction = -1e308 - self.bestrep = 0 + self.parnames = parnames + self.parameters= len(parnames) + self.params_min = [np.nan]*self.parameters + self.params_max = [np.nan]*self.parameters + self.objectivefunction_min = 1e308 + self.objectivefunction_max = -1e308 self.starttime = time.time() self.last_print = time.time() - self.repetitions = None + self.repetitions = repetitions self.stop = False + + def minimizer(self, objval, params): + if objval < self.objectivefunction_min: + self.objectivefunction_min = objval + self.params_min = list(params) + def maximizer(self, objval, params): + if objval > self.objectivefunction_max: + self.objectivefunction_max = objval + self.params_max = list(params) + + def grid(self, objval, params): + if objval < self.objectivefunction_min: + self.objectivefunction_min = objval + self.params_min = list(params) + if objval > self.objectivefunction_max: + self.objectivefunction_max = objval + self.params_max = list(params) + + def __call__(self, objectivefunction, params, block_print=False): - self.curparmeterset = params self.rep+=1 - if type(objectivefunction) == type([]): - if objectivefunction[0] > self.objectivefunction: - # Show only the first best objectivefunction when working with - # more than one objectivefunction - self.objectivefunction = objectivefunction[0] - self.params = params - self.bestrep = self.rep + if type(objectivefunction) == type([]): #TODO: change to iterable + self.compare(objectivefunction[0], params) + else: - if objectivefunction > self.objectivefunction: - self.params = params - self.objectivefunction = objectivefunction - self.bestrep = self.rep + self.compare(objectivefunction, params) + + if self.rep == self.repetitions: self.stop = True + if not block_print: self.print_status() @@ -77,14 +105,59 @@ def print_status(self): if acttime - self.last_print >= 2: avg_time_per_run = (acttime - self.starttime) / (self.rep + 1) timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1))))) - - text = '%i of %i (best like=%g) est. time remaining: %s' % (self.rep, self.repetitions, - self.objectivefunction, timestr) + if self.optimization_direction == 'minimize': + text = '%i of %i, minimal objective function=%g, time remaining: %s' % ( + self.rep, self.repetitions, self.objectivefunction_min, timestr) + + if self.optimization_direction == 'maximize': + text = '%i of %i, maximal objective function=%g, time remaining: %s' % ( + self.rep, self.repetitions, self.objectivefunction_max, timestr) + + if self.optimization_direction == 'grid': + text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % ( + self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr) + print(text) self.last_print = time.time() + + def print_status_final(self): + print('\n*** Final SPOTPY summary ***') + print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds') + print('Total Repetitions:', self.rep) + + if self.optimization_direction == 'minimize': + print('Minimal objective value: %g' % (self.objectivefunction_min)) + print('Corresponding parameter setting:') + for i in range(self.parameters): + text = '%s: %g' % (self.parnames[i], self.params_min[i]) + print(text) + + if self.optimization_direction == 'maximize': + print('Maximal objective value: %g' % (self.objectivefunction_max)) + print('Corresponding parameter setting:') + for i in range(self.parameters): + text = '%s: %g' % (self.parnames[i], self.params_max[i]) + print(text) + + if self.optimization_direction == 'grid': + print('Minimal objective value: %g' % (self.objectivefunction_min)) + print('Corresponding parameter setting:') + for i in range(self.parameters): + text = '%s: %g' % (self.parnames[i], self.params_min[i]) + print(text) + + print('Maximal objective value: %g' % (self.objectivefunction_max)) + print('Corresponding parameter setting:') + for i in range(self.parameters): + text = '%s: %g' % (self.parnames[i], self.params_max[i]) + print(text) + + print('******************************\n') + def __repr__(self): - return 'Best objectivefunction: %g' % self.objectivefunction + return 'Min objectivefunction: %g \n Max objectivefunction: %g' % ( + self.objectivefunction_min, self.objectivefunction_max) class _algorithm(object): @@ -122,12 +195,6 @@ class _algorithm(object): db_precision:np.float type set np.float16, np.float32 or np.float64 for rounding of floats in the output database Default is np.float16 - alt_objfun: str or None, default: 'rmse' - alternative objectivefunction to be used for algorithm - * None: the objfun defined in spot_setup.objectivefunction is used - * any str: if str is found in spotpy.objectivefunctions, - this objectivefunction is used, else falls back to None - e.g.: 'log_p', 'rmse', 'bias', 'kge' etc. sim_timeout: float, int or None, default: None the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if sim_timeout is not None. @@ -139,18 +206,12 @@ class _algorithm(object): _unaccepted_parameter_types = (parameter.List, ) def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True, - dbappend=False, parallel='seq', save_sim=True, alt_objfun=None, - breakpoint=None, backup_every_rep=100, save_threshold=-np.inf, - db_precision=np.float16, sim_timeout=None, random_state=None): + dbappend=False, parallel='seq', save_sim=True, breakpoint=None, + backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16, + sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''): + # Initialize the user defined setup class self.setup = spot_setup - # Philipp: Changed from Tobi's version, now we are using both new class defined parameters - # as well as the parameters function. The new method get_parameters - # can deal with a missing parameters function - # - # For me (Philipp) it is totally unclear why all the samplers should call this function - # again and again instead of - # TODO: just storing a definite list of parameter objects here param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types) self.all_params = param_info['random'] self.constant_positions = parameter.get_constant_indices(spot_setup) @@ -163,16 +224,13 @@ def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True, self.non_constant_positions = np.arange(0,len(self.all_params)) self.parameter = self.get_parameters self.parnames = param_info['name'] - + self.algorithm_name = algorithm_name # Create a type to hold the parameter values using a namedtuple self.partype = parameter.ParameterSet(param_info) - # use alt_objfun if alt_objfun is defined in objectivefunctions, - # else self.setup.objectivefunction - self.objectivefunction = getattr( - objectivefunctions, alt_objfun or '', None) or self.setup.objectivefunction self.evaluation = self.setup.evaluation() self.save_sim = save_sim + self.optimization_direction = optimization_direction self.dbname = dbname or 'customDb' self.dbformat = dbformat or 'ram' self.db_precision = db_precision @@ -228,7 +286,6 @@ def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True, # the normal work on the chains self.repeat = ForEach(self.simulate) - self.status = _RunStatistic() def __str__(self): return '{type}({mtype}())->{dbname}'.format( @@ -247,9 +304,8 @@ def get_parameters(self): return pars[self.non_constant_positions] def set_repetiton(self, repetitions): - - self.status.repetitions = repetitions - + self.status = _RunStatistic(repetitions, self.algorithm_name, + self.optimization_direction, self.parnames) # In MPI, this command will do nothing on the master process # but the worker processes are going to wait for jobs. # Hence the workers will only receive parameters for the @@ -262,13 +318,8 @@ def final_call(self): self.datawriter.finalize() except AttributeError: # Happens if no database was assigned pass - print('End of sampling') - text = 'Best run at %i of %i (best like=%g) with parameter set:' % ( - self.status.bestrep, self.status.repetitions, self.status.objectivefunction) - print(text) - print(self.status.params) - text = 'Duration:' + str(round((time.time() - self.status.starttime), 2)) + ' s' - print(text) + self.status.print_status_final() + def _init_database(self, like, randompar, simulations): if self.dbinit: @@ -332,10 +383,10 @@ def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negat # Save everything in the database, if save is True # This is needed as some algorithms just want to know the fitness, # before they actually save the run in a database (e.g. sce-ua) + self.status(like,params,block_print=block_print) if save_run is True and simulation is not None: - self.save(like, params, simulations=simulation, chains=chains) if type(like)==type([]): return like[0] @@ -349,11 +400,11 @@ def getfitness(self, simulation, params): """ try: #print('Using parameters in fitness function') - return self.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames)) + return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames)) except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction #print('Not using parameters in fitness function') - return self.objectivefunction(evaluation=self.evaluation, simulation=simulation) + return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation) def simulate(self, id_params_tuple): """This is a simple wrapper of the model, returning the result together with diff --git a/spotpy/algorithms/abc.py b/spotpy/algorithms/abc.py index 60e81bdb..be0d4fba 100644 --- a/spotpy/algorithms/abc.py +++ b/spotpy/algorithms/abc.py @@ -16,7 +16,7 @@ class abc(_algorithm): """ - This class holds the Artificial Bee Colony(ABC) algorithm, based on Karaboga (2007). + This class holds the Artificial Bee Colony (ABC) algorithm, based on Karaboga (2007). D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005. D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x @@ -54,7 +54,8 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Artificial Bee Colony (ABC) algorithm' super(abc, self).__init__(*args, **kwargs) @@ -196,7 +197,7 @@ def sample(self, repetitions, eb=48, a=(1 / 10), peps=0.0001, ownlimit=False, li if self.status.stop: print('Stopping samplig') break - gnrng = -self.status.objectivefunction + gnrng = -self.status.objectivefunction_max if icall >= repetitions: print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT') print('ON THE MAXIMUM NUMBER OF TRIALS ') diff --git a/spotpy/algorithms/dds.py b/spotpy/algorithms/dds.py index c9f0663c..11c488cc 100644 --- a/spotpy/algorithms/dds.py +++ b/spotpy/algorithms/dds.py @@ -125,7 +125,7 @@ def neigh_value_discrete(self, s, s_min, s_max, r): s_new = sample + 1 return s_new - def neigh_value_mixed(self, x_curr, r, j): + def neigh_value_mixed(self, x_curr, r, j, x_min, x_max): """ :param x_curr: @@ -135,8 +135,7 @@ def neigh_value_mixed(self, x_curr, r, j): :return: """ s = x_curr[j] - x_min = x_curr.minbound[j] - x_max = x_curr.maxbound[j] + if not x_curr.as_int[j]: return self.neigh_value_continuous(s, x_min, x_max, r) else: @@ -207,12 +206,13 @@ def __init__(self, *args, **kwargs): self.r = kwargs.pop("r") except KeyError: self.r = 0.2 # default value - + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Dynamically Dimensioned Search (DDS) algorithm' super(dds, self).__init__(*args, **kwargs) self.np_random = np.random - self.status.params = ParameterSet(self.parameter()) + #self.status.params_max = ParameterSet(self.parameter()) # self.generator_repetitions will be set in `sample` and is needed to generate a # generator which sends back actual parameter s_test @@ -230,7 +230,7 @@ def get_next_x_curr(self): """ # We need to shift position and length of the sampling process for rep in range(self.generator_repetitions): - yield rep, self.calculate_next_s_test(self.status.params, rep, self.generator_repetitions, self.r) + yield rep, self.calculate_next_s_test(self.params_max, rep, self.generator_repetitions, self.r) def sample(self, repetitions, trials=1, x_initial=np.array([])): """ @@ -271,9 +271,11 @@ def sample(self, repetitions, trials=1, x_initial=np.array([])): debug_results = [] self.set_repetiton(repetitions) + self.min_bound, self.max_bound = self.parameter( + )['minbound'], self.parameter()['maxbound'] print('Starting the DDS algotrithm with '+str(repetitions)+ ' repetitions...') - number_of_parameters = len(self.status.params) # number_of_parameters is the amount of parameters + number_of_parameters = self.status.parameters # number_of_parameters is the amount of parameters if len(x_initial) == 0: initial_iterations = np.int(np.max([5, round(0.005 * repetitions)])) @@ -282,44 +284,51 @@ def sample(self, repetitions, trials=1, x_initial=np.array([])): else: initial_iterations = 1 x_initial = np.array(x_initial) - if not (np.all(x_initial <= self.status.params.maxbound) and np.all( - x_initial >= self.status.params.minbound)): + if not (np.all(x_initial <= self.max_bound) and np.all( + x_initial >= self.min_bound)): raise ValueError("User specified 'x_initial' but the values are not within the parameter range") # Users can define trial runs in within "repetition" times the algorithm will be executed for trial in range(trials): - self.status.objectivefunction = -1e308 + #objectivefunction_max = -1e308 + params_max = x_initial # repitionno_best saves on which iteration the best parameter configuration has been found repitionno_best = initial_iterations # needed to initialize variable and avoid code failure when small # iterations - repetions_left = self.calc_initial_para_configuration(initial_iterations, trial, + params_max, repetions_left, objectivefunction_max = self.calc_initial_para_configuration(initial_iterations, trial, repetitions, x_initial) - self.fix_status_params_format() - trial_best_value = self.status.params.copy() - + params_max = self.fix_status_params_format(params_max) + trial_best_value = list(params_max)#self.status.params_max.copy() + # important to set this field `generator_repetitions` so that # method `get_next_s_test` can generate exact parameters self.generator_repetitions = repetions_left - + self.params_max = params_max for rep, x_curr, simulations in self.repeat(self.get_next_x_curr()): - self.postprocessing(rep, x_curr, simulations, chains=trial) - self.fix_status_params_format() - print('Best solution found has obj function value of ' + str(self.status.objectivefunction) + ' at ' + like = self.postprocessing(rep, x_curr, simulations, chains=trial) + if like > objectivefunction_max: + objectivefunction_max = like + self.params_max = list(x_curr) + self.params_max = self.fix_status_params_format(self.params_max) + + print('Best solution found has obj function value of ' + str(objectivefunction_max) + ' at ' + str(repitionno_best) + '\n\n') - debug_results.append({"sbest": self.status.params, "trial_initial": trial_best_value,"objfunc_val": self.status.objectivefunction}) + debug_results.append({"sbest": self.params_max, "trial_initial": trial_best_value,"objfunc_val": objectivefunction_max}) self.final_call() return debug_results - def fix_status_params_format(self): + def fix_status_params_format(self, params_max): start_params = ParameterSet(self.parameter()) - start_params.set_by_array([j for j in self.status.params]) - self.status.params = start_params + start_params.set_by_array([j for j in params_max]) + return start_params def calc_initial_para_configuration(self, initial_iterations, trial, repetitions, x_initial): - max_bound, min_bound = self.status.params.maxbound, self.status.params.minbound - parameter_bound_range = max_bound - min_bound + #max_bound, min_bound = self.status.params_max.maxbound, self.status.params_max.minbound + parameter_bound_range = self.max_bound - self.min_bound number_of_parameters = len(parameter_bound_range) - discrete_flag = self.status.params.as_int + discrete_flag = ParameterSet(self.parameter()).as_int + params_max = x_initial + objectivefunction_max = -1e308 # Calculate the initial Solution, if `initial_iterations` > 1 otherwise the user defined a own one. # If we need to find an initial solution we iterating initial_iterations times to warm um the algorithm # by trying which randomized generated input matches best @@ -332,23 +341,28 @@ def calc_initial_para_configuration(self, initial_iterations, trial, repetitions raise ValueError('# Initialization samples >= Max # function evaluations.') starting_generator = ( - (rep, [self.np_random.randint(np.int(min_bound[j]), np.int(max_bound[j]) + 1) if - discrete_flag[j] else min_bound[j] + parameter_bound_range[j] * self.np_random.rand() + (rep, [self.np_random.randint(np.int(self.min_bound[j]), np.int(self.max_bound[j]) + 1) if + discrete_flag[j] else self.min_bound[j] + parameter_bound_range[j] * self.np_random.rand() for j in range(number_of_parameters)]) for rep in range(int(initial_iterations))) for rep, x_curr, simulations in self.repeat(starting_generator): - self.postprocessing(rep, x_curr, simulations) # get obj function value + like = self.postprocessing(rep, x_curr, simulations) # get obj function value # status setting update - self.fix_status_params_format() + if like > objectivefunction_max: + objectivefunction_max = like + params_max = list(x_curr) + params_max = self.fix_status_params_format(params_max) else: # now initial_iterations=1, using a user supplied initial solution. Calculate obj func value. repetions_left = repetitions - 1 # use this to reduce number of fevals in DDS loop rep, x_test_param, simulations = self.simulate((0, x_initial)) # get from the inputs - self.postprocessing(rep, x_test_param, simulations) - self.fix_status_params_format() - - return repetions_left + like = self.postprocessing(rep, x_test_param, simulations) + if like > objectivefunction_max: + objectivefunction_max = like + params_max = list(x_test_param) + params_max = self.fix_status_params_format(params_max) + return params_max, repetions_left, objectivefunction_max def calculate_next_s_test(self, previous_x_curr, rep, rep_limit, r): """ @@ -378,12 +392,12 @@ def calculate_next_s_test(self, previous_x_curr, rep, rep_limit, r): for j in range(amount_params): if randompar[j] < probability_neighborhood: # then j th DV selected to vary in neighbour dvn_count = dvn_count + 1 - new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, j) + new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, j, self.min_bound[j],self.max_bound[j]) new_x_curr[j] = new_value # change relevant dec var value in x_curr if dvn_count == 0: # no DVs selected at random, so select ONE dec_var = np.int(np.ceil(amount_params * self.np_random.rand())) - new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, dec_var - 1) + new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, dec_var - 1, self.min_bound[j],self.max_bound[j]) new_x_curr[dec_var - 1] = new_value # change relevant decision variable value in s_test diff --git a/spotpy/algorithms/demcz.py b/spotpy/algorithms/demcz.py index 127db435..4baef532 100644 --- a/spotpy/algorithms/demcz.py +++ b/spotpy/algorithms/demcz.py @@ -93,16 +93,9 @@ def __init__(self, *args, **kwargs): save_sim: boolean * True: Simulation results will be saved * False: Simulationt results will not be saved - - alt_objfun: str or None, default: 'log_p' - alternative objectivefunction to be used for algorithm - * None: the objfun defined in spot_setup.objectivefunction is used - * any str: if str is found in spotpy.objectivefunctions, - this objectivefunction is used, else falls back to None - e.g.: 'log_p', 'rmse', 'bias', 'kge' etc. """ - if 'alt_objfun' not in kwargs: - kwargs['alt_objfun'] = 'log_p' + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Differential Evolution Markov Chain (DE-MC) algorithm' super(demcz, self).__init__(*args, **kwargs) def check_par_validity(self, par): diff --git a/spotpy/algorithms/dream.py b/spotpy/algorithms/dream.py index ac117b2b..055dcc65 100644 --- a/spotpy/algorithms/dream.py +++ b/spotpy/algorithms/dream.py @@ -55,9 +55,8 @@ def __init__(self, *args, **kwargs): * False: Simulation results will not be saved """ - - if 'alt_objfun' not in kwargs: - kwargs['alt_objfun'] = 'log_p' + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'DiffeRential Evolution Adaptive Metropolis (DREAM) algorithm' super(dream, self).__init__(*args, **kwargs) def check_par_validity_bound(self, par): @@ -124,9 +123,9 @@ def get_new_proposal_vector(self,cur_chain,newN,nrN): random_chain1,random_chain2 = self.get_other_random_chains(cur_chain) new_parameterset=[] #position = self.chain_samples-1#self.nChains*self.chain_samples+self.chain_samples+cur_chain-1 - cur_par_set = self.bestpar[cur_chain][self.nChainruns[cur_chain]-1] - random_par_set1 = self.bestpar[random_chain1][self.nChainruns[random_chain1]-1] - random_par_set2 = self.bestpar[random_chain2][self.nChainruns[random_chain2]-1] + cur_par_set = list(self.bestpar[cur_chain][self.nChainruns[cur_chain]-1]) + random_par_set1 = list(self.bestpar[random_chain1][self.nChainruns[random_chain1]-1]) + random_par_set2 = list(self.bestpar[random_chain2][self.nChainruns[random_chain2]-1]) for i in range(self.N):#Go through parameters @@ -144,9 +143,9 @@ def get_new_proposal_vector(self,cur_chain,newN,nrN): # return new_par def update_mcmc_status(self,par,like,sim,cur_chain): - self.bestpar[cur_chain][self.nChainruns[cur_chain]]=par + self.bestpar[cur_chain][self.nChainruns[cur_chain]]=list(par) self.bestlike[cur_chain]=like - self.bestsim[cur_chain]=sim + self.bestsim[cur_chain]=list(sim) def get_r_hat(self, parameter_array): """ diff --git a/spotpy/algorithms/fast.py b/spotpy/algorithms/fast.py index 21c70fc1..b23967e9 100644 --- a/spotpy/algorithms/fast.py +++ b/spotpy/algorithms/fast.py @@ -61,12 +61,10 @@ def __init__(self, *args, **kwargs): save_sim: boolean *True: Simulation results will be saved - *False: Simulationt results will not be saved + *False: Simulation results will not be saved ''' + kwargs['algorithm_name'] = 'Fourier Amplitude Sensitivity Test (FAST)' super(fast, self).__init__(*args, **kwargs) -# _algorithm.__init__(self, spot_setup, dbname=dbname, -# dbformat=dbformat, parallel=parallel, save_sim=save_sim, -# save_threshold=save_threshold) def scale_samples(self, params, bounds): ''' diff --git a/spotpy/algorithms/fscabc.py b/spotpy/algorithms/fscabc.py index 19f48dc6..daaa2f7f 100644 --- a/spotpy/algorithms/fscabc.py +++ b/spotpy/algorithms/fscabc.py @@ -16,7 +16,7 @@ class fscabc(_algorithm): """ - This class holds the Fitness Scaled Chaotic Artificial Bee Colony(FSCABC) algorithm, + This class holds the Fitness Scaled Chaotic Artificial Bee Colony (FSCABC) algorithm, based on: Yudong Zhang, Lenan Wu, and Shuihua Wang (2011). Magnetic Resonance Brain Image @@ -60,7 +60,8 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Fitness Scaled Chaotic Artificial Bee Colony (FSCABC) algorithm' super(fscabc, self).__init__(*args, **kwargs) def mutate(self, r): @@ -235,7 +236,7 @@ def sample(self, repetitions, eb=48, a=(1 / 10), peps=0.0001, kpow=5, ownlimit=F if self.status.stop: print('Stopping samplig') break - gnrng = -self.status.objectivefunction + gnrng = -self.status.objectivefunction_max if self.breakpoint == 'write' or self.breakpoint == 'readandwrite'\ and icall >= lastbackup+self.backup_every_rep: diff --git a/spotpy/algorithms/lhs.py b/spotpy/algorithms/lhs.py index 3e79bd09..d9ac0535 100644 --- a/spotpy/algorithms/lhs.py +++ b/spotpy/algorithms/lhs.py @@ -51,7 +51,7 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['algorithm_name'] = 'Latin Hypercube Sampling (LHS)' super(lhs, self).__init__(*args, **kwargs) def sample(self, repetitions): diff --git a/spotpy/algorithms/list_sampler.py b/spotpy/algorithms/list_sampler.py index e54e1cfc..3138ea43 100644 --- a/spotpy/algorithms/list_sampler.py +++ b/spotpy/algorithms/list_sampler.py @@ -43,7 +43,7 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['algorithm_name'] = 'List Sampler' super(list_sampler, self).__init__(*args, **kwargs) def sample(self, repetitions=None): diff --git a/spotpy/algorithms/mc.py b/spotpy/algorithms/mc.py index fbcf36a2..a22e853b 100644 --- a/spotpy/algorithms/mc.py +++ b/spotpy/algorithms/mc.py @@ -49,7 +49,7 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['algorithm_name'] = 'Monte Carlo (MC) sampler' super(mc, self).__init__(*args, **kwargs) def sample(self, repetitions): diff --git a/spotpy/algorithms/mcmc.py b/spotpy/algorithms/mcmc.py index 8e5306a8..7682f68f 100644 --- a/spotpy/algorithms/mcmc.py +++ b/spotpy/algorithms/mcmc.py @@ -53,8 +53,8 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - if 'alt_objfun' not in kwargs: - kwargs['alt_objfun'] = 'log_p' + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Markov Chain Monte Carlo (MCMC) sampler' super(mcmc, self).__init__(*args, **kwargs) def check_par_validity(self, par): @@ -144,7 +144,7 @@ def sample(self, repetitions,nChains=1): #Refresh MCMC progressbar every two second if acttime - intervaltime >= 2 and self.iter >=2: text = '%i of %i (best like=%g)' % ( - self.iter + self.burnIn, repetitions, self.status.objectivefunction) + self.iter + self.burnIn, repetitions, self.status.objectivefunction_max) text = "Acceptance rates [%] =" +str(np.around((self.accepted)/float(((self.iter-self.burnIn)/self.nChains)),decimals=4)*100).strip('array([])') print(text) intervaltime = time.time() diff --git a/spotpy/algorithms/mle.py b/spotpy/algorithms/mle.py index 431b267a..2a6fec48 100644 --- a/spotpy/algorithms/mle.py +++ b/spotpy/algorithms/mle.py @@ -45,8 +45,10 @@ def __init__(self, *args, **kwargs): save_sim: boolean * True: Simulation results will be saved - * False: Simulationt results will not be saved + * False: Simulation results will not be saved ''' + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Maximum Likelihood Estimation (MLE) algorithm' super(mle, self).__init__(*args, **kwargs) diff --git a/spotpy/algorithms/rope.py b/spotpy/algorithms/rope.py index 7dc6404b..caf1ae2a 100644 --- a/spotpy/algorithms/rope.py +++ b/spotpy/algorithms/rope.py @@ -21,8 +21,7 @@ class rope(_algorithm): Hydrol. Earth Syst. Sci. Discuss., 5(3), 1641–1675, 2008. ''' - def __init__(self, spot_setup, dbname=None, dbformat=None, - parallel='seq', save_sim=True, save_threshold=-np.inf,sim_timeout = None): + def __init__(self, *args, **kwargs): ''' Input @@ -60,11 +59,11 @@ def __init__(self, spot_setup, dbname=None, dbformat=None, :param save_sim: boolean *True: Simulation results will be saved - *False: Simulationt results will not be saved + *False: Simulation results will not be saved ''' - _algorithm.__init__(self, spot_setup, dbname=dbname, - dbformat=dbformat, parallel=parallel, - save_sim=save_sim, save_threshold=save_threshold,sim_timeout = sim_timeout) + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'RObust Parameter Estimation (ROPE) algorithm' + super(rope, self).__init__(*args, **kwargs) def get_best_runs(self, likes, pars, runs, percentage): ''' @@ -156,7 +155,7 @@ def sample(self, repetitions=None, repetitions_first_run=None, break if acttime - intervaltime >= 2: text = '1 Subset: Run %i of %i (best like=%g)' % ( - rep, first_run, self.status.objectivefunction) + rep, first_run, self.status.objectivefunction_max) print(text) intervaltime = time.time() @@ -199,7 +198,7 @@ def sample(self, repetitions=None, repetitions_first_run=None, subset + 2, rep, repetitions_following_runs, - self.status.objectivefunction) + self.status.objectivefunction_max) print(text) intervaltime = time.time() if self.status.stop: diff --git a/spotpy/algorithms/sa.py b/spotpy/algorithms/sa.py index c85dc6ff..15bc3f31 100644 --- a/spotpy/algorithms/sa.py +++ b/spotpy/algorithms/sa.py @@ -53,7 +53,8 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - + kwargs['optimization_direction'] = 'maximize' + kwargs['algorithm_name'] = 'Simulated Annealing (SA) algorithm' super(sa, self).__init__(*args, **kwargs) def check_par_validity(self, par): diff --git a/spotpy/algorithms/sceua.py b/spotpy/algorithms/sceua.py index b3160c85..32645275 100644 --- a/spotpy/algorithms/sceua.py +++ b/spotpy/algorithms/sceua.py @@ -60,10 +60,10 @@ def __init__(self, *args, **kwargs): * True: Simulation results will be saved * False: Simulation results will not be saved """ - - if 'alt_objfun' not in kwargs: - kwargs['alt_objfun'] = 'rmse' + kwargs['optimization_direction'] = 'minimize' + kwargs['algorithm_name'] = 'Shuffled Complex Evolution (SCE-UA) algorithm' super(sceua, self).__init__(*args, **kwargs) + def simulate(self, id_params_tuple): """This overwrites the simple wrapper function of _algorithms.py @@ -151,7 +151,6 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 Value of the normalized geometric range of the parameters in the population below which convergence is deemed achieved. """ self.set_repetiton(repetitions) - print('Starting the SCE-UA algorithm with '+str(repetitions)+ ' repetitions...') # Initialize SCE parameters: self.ngs = ngs randompar = self.parameter()['random'] @@ -161,6 +160,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 self.nspl = self.npg npt = self.npg * self.ngs self.iseed = 1 + self.discarded_runs = 0 self.bl, self.bu = self.parameter()['minbound'], self.parameter()[ 'maxbound'] bound = self.bu - self.bl # np.array @@ -180,7 +180,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 icall = 0 xf = np.zeros(npt) - print ('burn-in sampling started...') + print ('Starting burn-in sampling...') # Burn in param_generator = ((rep, x[rep]) for rep in range(int(npt))) @@ -224,7 +224,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 print( 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE') - print ('burn-in sampling completed...') + print ('Burn-in sampling completed...') # Begin evolution loops: nloop = 0 @@ -232,7 +232,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 criter_change_pcent = 1e+5 self.repeat.setphase('ComplexEvo') - print ('ComplexEvo started...') + print ('Starting Complex Evolution...') proceed = True while icall < repetitions and gnrng > peps and criter_change_pcent > pcento and proceed == True: nloop += 1 @@ -259,6 +259,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 #Collect data from all slaves but do not save proceed=False like = self.postprocessing(i, pars[i], sims[i], chains=i+1, save_run=False) + self.discarded_runs+=1 print('Skipping saving') if self.breakpoint == 'write' or self.breakpoint == 'readandwrite'\ @@ -300,19 +301,18 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 elif nloop >= kstop: # necessary so that the area of high posterior density is visited as much as possible - print ('objective function convergence criteria is now being updated and assessed...') + print ('Objective function convergence criteria is now being updated and assessed...') absolute_change = np.abs( - criter[nloop - 1] - criter[nloop - kstop]) + criter[nloop - 1] - criter[nloop - kstop])*100 denominator = np.mean(np.abs(criter[(nloop - kstop):nloop])) if denominator == 0.0: criter_change_pcent = 0.0 else: - criter_change_pcent = absolute_change / denominator * 100 - print ('updated convergence criteria: %f' % criter_change_pcent) + criter_change_pcent = absolute_change / denominator + print ('Updated convergence criteria: %f' % criter_change_pcent) if criter_change_pcent <= pcento: - text = 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE USER-SPECIFIED THRESHOLD %f' % ( - kstop, pcento) - print(text) + print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE USER-SPECIFIED THRESHOLD %f' % ( + kstop, pcento)) print( 'CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!') elif self.status.stop: @@ -320,16 +320,14 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000 break # End of the Outer Loops - text = 'SEARCH WAS STOPPED AT TRIAL NUMBER: %d' % self.status.rep - print(text) - text = 'NORMALIZED GEOMETRIC RANGE = %f' % gnrng - print(text) - text = 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f PERCENT' % ( - kstop, criter_change_pcent) - print(text) + print('SEARCH WAS STOPPED AT TRIAL NUMBER: %d' % self.status.rep) + print('NUMBER OF DISCARDED TRIALS: %d' % self.discarded_runs) + print('NORMALIZED GEOMETRIC RANGE = %f' % gnrng) + print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f PERCENT' % ( + kstop, criter_change_pcent)) # reshape BESTX - BESTX = BESTX.reshape(BESTX.size // self.nopt, self.nopt) + #BESTX = BESTX.reshape(BESTX.size // self.nopt, self.nopt) self.final_call() @@ -382,6 +380,8 @@ def _cceua(self, s, sf): ## fnew = functn(self.nopt,snew); _, _, simulations = _algorithm.simulate(self, (1, snew)) like = self.postprocessing(1, snew, simulations, save_run=False, block_print=True) + self.discarded_runs+=1 + fnew = like if self.status.stop: print('Stopping complex evolution') @@ -394,6 +394,7 @@ def _cceua(self, s, sf): _, _, simulations = _algorithm.simulate(self, (2, snew)) like = self.postprocessing(2, snew, simulations, save_run=False, block_print=True) + self.discarded_runs+=1 fnew = like if self.status.stop: print('Stopping complex evolution') @@ -405,7 +406,8 @@ def _cceua(self, s, sf): snew = self._sampleinputmatrix(1, self.nopt)[0] _, _, simulations = _algorithm.simulate(self, (3, snew)) like = self.postprocessing(3, snew, simulations, save_run=False, block_print=True) - fnew = like + self.discarded_runs+=1 + fnew = like # END OF CCE return snew, fnew, simulations diff --git a/spotpy/analyser.py b/spotpy/analyser.py index ab0c2713..bc382cf9 100644 --- a/spotpy/analyser.py +++ b/spotpy/analyser.py @@ -279,7 +279,7 @@ def get_posterior(results,percentage=10, maximize=True): index = np.where(results['like1']>=np.percentile(results['like1'],100.0-percentage)) return results[index] -def plot_parameter_uncertainty(posterior_results,evaluation): +def plot_parameter_uncertainty(posterior_results,evaluation, fig_name='Posterior_parameter_uncertainty.png'): import pylab as plt simulation_fields = get_simulation_fields(posterior_results) @@ -303,8 +303,8 @@ def plot_parameter_uncertainty(posterior_results,evaluation): plt.xlabel('Number of Observation Points') plt.ylabel ('Simulated value') plt.legend(loc='upper right') - fig.savefig('Posteriot_parameter_uncertainty.png',dpi=300) - text='A plot of the parameter uncertainty has been saved as "Posteriot_parameter_uncertainty.png"' + fig.savefig(fig_name,dpi=300) + text='A plot of the parameter uncertainty has been saved as '+fig_name print(text) @@ -354,9 +354,10 @@ def get_min_max(spotpy_setup): :return: Possible minimal and maximal values of all parameters in the parameters function of the spotpy_setup class :rtype: Two arrays """ - randompar = spotpy_setup.parameters()['random'] + parameter_obj = spotpy.parameter.generate(spotpy.parameter.get_parameters_from_setup(spotpy_setup)) + randompar = parameter_obj['random'] for i in range(1000): - randompar = np.column_stack((randompar, spotpy_setup.parameters()['random'])) + randompar = np.column_stack((randompar, parameter_obj['random'])) return np.amin(randompar, axis=1), np.amax(randompar, axis=1) def get_parbounds(spotpy_setup): @@ -429,7 +430,7 @@ def get_sensitivity_of_fast(results,like_index=1,M=4, print_to_console=True): (parnames[i], Si['S1'][i], Si['ST'][i])) return Si -def plot_fast_sensitivity(results,like='like1',number_of_sensitiv_pars=10): +def plot_fast_sensitivity(results,like='like1',number_of_sensitiv_pars=10,fig_name='FAST_sensitivity.png'): """ Example, how to plot the sensitivity for every parameter of your result array, created with the FAST algorithm @@ -504,14 +505,12 @@ def plot_fast_sensitivity(results,like='like1',number_of_sensitiv_pars=10): ax.set_xticklabels(['0']+parnames) ax.plot(np.arange(-1,len(parnames)+1,1),[threshold]*(len(parnames)+2),'r--') ax.set_xlim(-0.5,len(parnames)-0.5) - fig.savefig('FAST_sensitivity.png',dpi=300) + fig.savefig(fig_name,dpi=300) -def plot_heatmap_griewank(results,algorithms): +def plot_heatmap_griewank(results,algorithms, fig_name='heatmap_griewank.png'): """Example Plot as seen in the SPOTPY Documentation""" import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) from matplotlib import ticker from matplotlib import cm @@ -555,15 +554,13 @@ def plot_heatmap_griewank(results,algorithms): ax.set_title(algorithms[i]) - fig.savefig('test.png', bbox_inches='tight') # <------ this + fig.savefig(fig_name, bbox_inches='tight') -def plot_objectivefunction(results,evaluation,limit=None,sort=True): +def plot_objectivefunction(results,evaluation,limit=None,sort=True, fig_name = 'objective_function.png'): """Example Plot as seen in the SPOTPY Documentation""" import matplotlib.pyplot as plt - from matplotlib import colors likes=calc_like(results,evaluation,spotpy.objectivefunctions.rmse) - cnames=list(colors.cnames) data=likes #Calc confidence Interval mean = np.average(data) @@ -597,54 +594,43 @@ def plot_objectivefunction(results,evaluation,limit=None,sort=True): #plt.ylim(ymin=-1,ymax=1.39) else: plt.plot(bestlike) + plt.savefig(fig_name) -def plot_parametertrace_algorithms(results,algorithmnames=None,parameternames=None): +def plot_parametertrace_algorithms(result_lists, algorithmnames, spot_setup, + fig_name='parametertrace_algorithms.png'): """Example Plot as seen in the SPOTPY Documentation""" import matplotlib.pyplot as plt - from matplotlib import colors font = {'family' : 'calibri', 'weight' : 'normal', 'size' : 20} plt.rc('font', **font) fig=plt.figure(figsize=(17,5)) - subplots=len(results) - rows=2 + subplots=len(result_lists) + parameter = spotpy.parameter.get_parameters_array(spot_setup) + rows=len(parameter['name']) for j in range(rows): for i in range(subplots): ax = plt.subplot(rows,subplots,i+1+j*subplots) - if j==0: - if parameternames: - data=results[i]['par'+parameternames[0]] - else: - data=results[i]['par0'] - if j==1: - if parameternames: - data=results[i]['par'+parameternames[1]] - else: - data=results[i]['par1'] - ax.set_xlabel(algorithmnames[i-subplots]) - + data=result_lists[i]['par'+parameter['name'][j]] ax.plot(data,'b-') - ax.set_ylim(-50,50) - if i==0 and j==0: - ax.set_ylabel('x') - ax.yaxis.set_ticks([-50,0,50]) + if i==0: + ax.set_ylabel(parameter['name'][j]) rep = len(data) - if i==0 and j==1: - ax.set_ylabel('y') - ax.yaxis.set_ticks([-50,0,50]) - if j==0: - ax.xaxis.set_ticks([]) if i>0: ax.yaxis.set_ticks([]) + if j==rows-1: + ax.set_xlabel(algorithmnames[i-subplots]) + else: + ax.xaxis.set_ticks([]) ax.plot([1]*rep,'r--') ax.set_xlim(0,rep) + ax.set_ylim(parameter['minbound'][j],parameter['maxbound'][j]) #plt.tight_layout() - fig.savefig('test2.png', bbox_inches='tight') + fig.savefig(fig_name, bbox_inches='tight') -def plot_parametertrace(results,parameternames=None): +def plot_parametertrace(results,parameternames=None,fig_name='Parameter_trace.png'): """ Get a plot with all values of a given parameter in your result array. The plot will be saved as a .png file. @@ -659,8 +645,6 @@ def plot_parametertrace(results,parameternames=None): :rtype: figure """ import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) fig=plt.figure(figsize=(16,9)) if not parameternames: parameternames=get_parameternames(results) @@ -677,11 +661,11 @@ def plot_parametertrace(results,parameternames=None): ax.set_title('Parametertrace') ax.legend() i+=1 - fig.savefig(names+'_trace.png') - text='The figure as been saved as "'+names+'trace.png"' + fig.savefig(fig_name) + text='The figure as been saved as "'+fig_name print(text) -def plot_posterior_parametertrace(results,parameternames=None,threshold=0.1): +def plot_posterior_parametertrace(results,parameternames=None,threshold=0.1, fig_name='Posterior_parametertrace.png'): """ Get a plot with all values of a given parameter in your result array. The plot will be saved as a .png file. @@ -696,8 +680,6 @@ def plot_posterior_parametertrace(results,parameternames=None,threshold=0.1): :rtype: figure """ import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) fig=plt.figure(figsize=(16,9)) results=sort_like(results) @@ -716,11 +698,11 @@ def plot_posterior_parametertrace(results,parameternames=None,threshold=0.1): ax.set_title('Parametertrace') ax.legend() i+=1 - fig.savefig(names+'_trace.png') - text='The figure as been saved as "'+names+'trace.png"' + fig.savefig(fig_name) + text='The figure as been saved as '+fig_name print(text) -def plot_posterior(results,evaluation,dates=None,ylabel='Posterior model simulation',xlabel='Time',objectivefunction='NSE',objectivefunctionmax=True,calculatelike=True,sort=True, bestperc=0.1): +def plot_posterior(results,evaluation,dates=None,ylabel='Posterior model simulation',xlabel='Time',bestperc=0.1, fig_name='bestmodelrun.png'): """ Get a plot with the maximum objectivefunction of your simulations in your result array. @@ -747,37 +729,12 @@ def plot_posterior(results,evaluation,dates=None,ylabel='Posterior model simulat Returns: figure. Plot of the simulation with the maximum objectivefunction value in the result array as a blue line and dots for the evaluation data. - A really great idea. A way you might use me is - >>> bcf.analyser.plot_bestmodelrun(results,evaluation, ylabel='Best model simulation') - """ import matplotlib.pyplot as plt - from matplotlib import colors - import random - - cnames=list(colors.cnames) - - - plt.rc('font', **font) - if sort: - results=sort_like(results) - if calculatelike: - likes=calc_like(results, evaluation,spotpy.objectivefunctions.rmse) - maximum=max(likes) - par=get_parameters(results) - sim=get_modelruns(results) - index=likes.index(maximum) - bestmodelrun=list(sim[index]) - bestparameterset=list(par[index]) - - else: - if objectivefunctionmax==True: - index,maximum=get_maxlikeindex(results) - else: - index,maximum=get_minlikeindex(results) - sim=get_modelruns(results) - bestmodelrun=list(sim[index][0])#Transform values into list to ensure plotting - bestparameterset=list(get_parameters(results)[index][0]) + index,maximum=get_maxlikeindex(results) + sim=get_modelruns(results) + bestmodelrun=list(sim[index][0])#Transform values into list to ensure plotting + bestparameterset=list(get_parameters(results)[index][0]) parameternames=list(get_parameternames(results) ) bestparameterstring='' @@ -787,35 +744,18 @@ def plot_posterior(results,evaluation,dates=None,ylabel='Posterior model simulat bestparameterstring+='\n' bestparameterstring+=parameternames[i]+'='+str(round(bestparameterset[i],4))+',' fig=plt.figure(figsize=(16,8)) - if dates is not None: - chains=int(max(results['chain'])) - colors=list(cnames) - random.shuffle(colors) - - for s in sim[5000:]: - plt.plot(dates,list(s),'c-',alpha=0.05) - plt.plot(dates,bestmodelrun,'b-',label='Simulations: '+objectivefunction+'='+str(round(maxNSE,4))) - plt.plot(dates,evaluation,'ro',label='Evaluation') - else: - pl_i = 1 - for s in sim: - # why we plotting dates again is we in case that it is none, this will cause an error?? - #plt.plot(dates, list(s), 'c-', alpha=0.05) - plt.plot(pl_i, list(s), 'c-', alpha=0.05) - pl_i+=1 - - plt.plot(bestmodelrun,'b-',label='Simulations: '+objectivefunction+'='+str(round(maxNSE,4))) - plt.plot(evaluation,'ro',label='Evaluation') + plt.plot(bestmodelrun,'b-',label='Simulation='+str(round(maxNSE,4))) + plt.plot(evaluation,'ro',label='Evaluation') plt.legend() plt.ylabel(ylabel) plt.xlabel(xlabel) plt.title('Maximum objectivefunction of Simulations with '+bestparameterstring[0:-2]) - fig.savefig('bestmodelrun.png') - text='The figure as been saved as "bestmodelrun.png"' + fig.savefig(fig_name) + text='The figure as been saved as '+fig_name print(text) -def plot_bestmodelrun(results,evaluation): +def plot_bestmodelrun(results,evaluation,fig_name ='Best_model_run.png'): """ Get a plot with the maximum objectivefunction of your simulations in your result array. @@ -843,13 +783,13 @@ def plot_bestmodelrun(results,evaluation): plt.xlabel('Number of Observation Points') plt.ylabel ('Simulated value') plt.legend(loc='upper right') - text='A plot of the best model run has been saved as "Best_model_run.png"' + fig.savefig(fig_name,dpi=300) + text='A plot of the best model run has been saved as '+fig_name print(text) - fig.savefig('Best_model_run.png',dpi=300) - + -def plot_bestmodelruns(results,evaluation,algorithms=None,dates=None,ylabel='Best model simulation',xlabel='Date',objectivefunctionmax=True,calculatelike=True): +def plot_bestmodelruns(results,evaluation,algorithms=None,dates=None,ylabel='Best model simulation',xlabel='Date',objectivefunctionmax=True,calculatelike=True,fig_name='bestmodelrun.png'): """ Get a plot with the maximum objectivefunction of your simulations in your result array. @@ -881,9 +821,6 @@ def plot_bestmodelruns(results,evaluation,algorithms=None,dates=None,ylabel='Bes """ import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) - plt.rc('font', **font) fig=plt.figure(figsize=(17,8)) @@ -923,11 +860,11 @@ def plot_bestmodelruns(results,evaluation,algorithms=None,dates=None,ylabel='Bes plt.xlabel(xlabel) plt.ylim(15,50) #DELETE WHEN NOT USED WITH SOIL MOISTUR RESULTS - fig.savefig('bestmodelrun.png') - text='The figure as been saved as "bestmodelrun.png"' + fig.savefig(fig_name) + text='The figure as been saved as '+fig_name print(text) -def plot_objectivefunctiontraces(results,evaluation,algorithms,filename='Like_trace'): +def plot_objectivefunctiontraces(results,evaluation,algorithms,fig_name='Like_trace.png'): import matplotlib.pyplot as plt from matplotlib import colors cnames=list(colors.cnames) @@ -953,13 +890,11 @@ def plot_objectivefunctiontraces(results,evaluation,algorithms,filename='Like_tr ax.yaxis.set_ticks([]) plt.tight_layout() - fig.savefig(str(filename)+'.png') + fig.savefig(fig_name) -def plot_regression(results,evaluation): +def plot_regression(results,evaluation,fig_name='regressionanalysis.png'): import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) fig=plt.figure(figsize=(16,9)) simulations=get_modelruns(results) for sim in simulations: @@ -967,33 +902,29 @@ def plot_regression(results,evaluation): plt.ylabel('simulation') plt.xlabel('evaluation') plt.title('Regression between simulations and evaluation data') - fig.savefig('regressionanalysis.png') - text='The figure as been saved as "regressionanalysis.png"' + fig.savefig(fig_name) + text='The figure as been saved as '+fig_name print(text) -def plot_parameterInteraction(results): +def plot_parameterInteraction(results, fig_name ='ParameterInteraction.png'): '''Input: List with values of parameters and list of strings with parameter names Output: Dotty plot of parameter distribution and gaussian kde distribution''' import matplotlib.pyplot as plt - from matplotlib import colors import pandas as pd - cnames=list(colors.cnames) parameterdistribtion=get_parameters(results) parameternames=get_parameternames(results) df = pd.DataFrame(np.asarray(parameterdistribtion).T.tolist(), columns=parameternames) pd.plotting.scatter_matrix(df, alpha=0.2, figsize=(12, 12), diagonal='kde') - plt.savefig('ParameterInteraction',dpi=300) + plt.savefig(fig_name,dpi=300) -def plot_allmodelruns(modelruns,observations,dates=None): +def plot_allmodelruns(modelruns,observations,dates=None, fig_name='bestmodel.png'): '''Input: Array of modelruns and list of Observations Output: Plot with all modelruns as a line and dots with the Observations ''' import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) fig=plt.figure(figsize=(16,9)) ax = plt.subplot(1,1,1) if dates is not None: @@ -1014,36 +945,30 @@ def plot_allmodelruns(modelruns,observations,dates=None): ax.set_xlabel = 'Best model simulation' ax.set_ylabel = 'Evaluation points' ax.set_title = 'Maximum objectivefunction of Simulations' - fig.savefig('bestmodel.png') - text='The figure as been saved as "Modelruns.png"' + fig.savefig(fig_name) + text='The figure as been saved as '+fig_name print(text) -def plot_autocorellation(parameterdistribution,parametername): +def plot_autocorellation(parameterdistribution,parametername,fig_name='Autocorrelation.png'): '''Input: List of sampled values for one Parameter Output: Parameter Trace, Histogramm and Autocorrelation Plot''' import matplotlib.pyplot as plt - from matplotlib import colors import pandas as pd - cnames=list(colors.cnames) - fig=plt.figure(figsize=(16,9)) - ax = plt.subplot(1,1,1) pd.plotting.autocorrelation_plot(parameterdistribution) - plt.savefig('Autocorellation'+str(parametername),dpi=300) + plt.savefig(fig_name,dpi=300) -def plot_gelman_rubin(r_hat_values): +def plot_gelman_rubin(r_hat_values,fig_name='gelman_rub.png'): '''Input: List of R_hat values of chains (see Gelman & Rubin 1992) Output: Plot as seen for e.g. in (Sadegh and Vrugt 2014)''' import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) fig=plt.figure(figsize=(16,9)) ax = plt.subplot(1,1,1) ax.plot(r_hat_values) ax.plot([1.2]*len(r_hat_values),'k--') ax.set_xlabel='r_hat' - + plt.savefig(fig_name,dpi=300) def gelman_rubin(x): '''NOT USED YET''' @@ -1074,8 +999,7 @@ def plot_Geweke(parameterdistribution,parametername): '''Input: Takes a list of sampled values for a parameter and his name as a string Output: Plot as seen for e.g. in BUGS or PyMC''' import matplotlib.pyplot as plt - from matplotlib import colors - cnames=list(colors.cnames) + # perform the Geweke test Geweke_values = _Geweke(parameterdistribution) diff --git a/spotpy/examples/gui_cmf_lumped.py b/spotpy/examples/gui_cmf_lumped.py index 38ad20f6..a1d8ffd7 100644 --- a/spotpy/examples/gui_cmf_lumped.py +++ b/spotpy/examples/gui_cmf_lumped.py @@ -7,15 +7,23 @@ from __future__ import division, print_function, unicode_literals -import datetime import spotpy from spotpy.gui.mpl import GUI -from spotpy.examples.spot_setup_cmf_lumped import SingleStorage +from spotpy.examples.spot_setup_hymod_python import spot_setup if __name__ == '__main__': - # Create the model - model = SingleStorage(datetime.datetime(1980, 1, 1), - datetime.datetime(1985, 12, 31)) - spotpy.describe.setup(model) - gui = GUI(model) + setup_class=spot_setup(_used_algorithm='sceua') + + #Select number of maximum allowed repetitions + rep=10000 + + # Create the SCE-UA sampler of spotpy, alt_objfun is set to None to force SPOTPY + # to jump into the def objectivefunction in the spot_setup class (default is + # spotpy.objectivefunctions.rmse) + sampler=spotpy.algorithms.sceua(setup_class, dbname='SCEUA_hymod', dbformat='csv', alt_objfun=None) + + #Start the sampler, one can specify ngs, kstop, peps and pcento id desired + #sampler.sample(rep,ngs=10, kstop=50, peps=0.1, pcento=0.1) + + gui = GUI(spot_setup()) gui.show() diff --git a/spotpy/examples/spot_setup_hymod_python.py b/spotpy/examples/spot_setup_hymod_python.py index c3c403d0..67db19d6 100644 --- a/spotpy/examples/spot_setup_hymod_python.py +++ b/spotpy/examples/spot_setup_hymod_python.py @@ -20,7 +20,7 @@ class spot_setup(object): cmax = spotpy.parameter.Uniform(low=1.0 , high=500, optguess=412.33) bexp = spotpy.parameter.Uniform(low=0.1 , high=2.0, optguess=0.1725) alpha = spotpy.parameter.Uniform(low=0.1 , high=0.99, optguess=0.8127) - Ks = spotpy.parameter.Uniform(low=0.0 , high=0.10, optguess=0.0404) + Ks = spotpy.parameter.Uniform(low=0.001 , high=0.10, optguess=0.0404) Kq = spotpy.parameter.Uniform(low=0.1 , high=0.99, optguess=0.5592) #fake1 =spotpy.parameter.Uniform(low=0.1 , high=10, optguess=0.5592) #fake2 =spotpy.parameter.Uniform(low=0.1 , high=10, optguess=0.5592) @@ -65,6 +65,10 @@ def evaluation(self): def objectivefunction(self,simulation,evaluation, params=None): if self._used_algorithm == 'sceua': like = spotpy.objectivefunctions.rmse(evaluation,simulation) + elif self._used_algorithm == 'dream': + like = spotpy.objectivefunctions.log_p(evaluation,simulation) + elif self._used_algorithm == 'default': + like = spotpy.objectivefunctions.nashsutcliffe(evaluation,simulation) else: like = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(evaluation,simulation) return like \ No newline at end of file diff --git a/spotpy/examples/spot_setup_rosenbrock.py b/spotpy/examples/spot_setup_rosenbrock.py index c09f9117..edc17969 100644 --- a/spotpy/examples/spot_setup_rosenbrock.py +++ b/spotpy/examples/spot_setup_rosenbrock.py @@ -12,7 +12,7 @@ import numpy as np from spotpy.parameter import Uniform -from spotpy.objectivefunctions import rmse +from spotpy.objectivefunctions import rmse, log_p class spot_setup(object): """ @@ -23,7 +23,8 @@ class spot_setup(object): x = Uniform(-10, 10, 1.5, 3.0, -10, 10, doc='x value of Rosenbrock function') y = Uniform(-10, 10, 1.5, 3.0, -10, 10, doc='y value of Rosenbrock function') z = Uniform(-10, 10, 1.5, 3.0, -10, 10, doc='z value of Rosenbrock function') - + def __init__(self,used_algorithm='default'): + self.used_algorithm =used_algorithm def simulation(self, vector): x=np.array(vector) simulations= [sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)] @@ -34,5 +35,10 @@ def evaluation(self): return observations def objectivefunction(self, simulation, evaluation): - objectivefunction = rmse(evaluation=evaluation, simulation=simulation) + if self.used_algorithm == 'sceua' or self.used_algorithm == 'abc' or self.used_algorithm == 'fscabc': + objectivefunction = rmse(evaluation=evaluation, simulation=simulation) + elif self.used_algorithm == 'dream' or self.used_algorithm == 'demcz' or self.used_algorithm == 'mcmc': + objectivefunction = log_p(evaluation=evaluation, simulation=simulation) + else: + objectivefunction = - rmse(evaluation=evaluation, simulation=simulation) return objectivefunction \ No newline at end of file diff --git a/spotpy/examples/tutorial_dream_hymod.py b/spotpy/examples/tutorial_dream_hymod.py index f87cdd41..9513e645 100644 --- a/spotpy/examples/tutorial_dream_hymod.py +++ b/spotpy/examples/tutorial_dream_hymod.py @@ -19,7 +19,7 @@ parallel ='seq' # Initialize the Hymod example (will only work on Windows systems) #spot_setup=spot_setup(parallel=parallel) - spot_setup=spot_setup() + spot_setup=spot_setup(_used_algorithm='dream') # Create the Dream sampler of spotpy, al_objfun is set to None to force SPOTPY # to jump into the def objectivefunction in the spot_setup class (default is @@ -33,7 +33,7 @@ convergence_limit = 1.2 runs_after_convergence = 100 - sampler=spotpy.algorithms.dream(spot_setup, dbname='DREAM_hymod', dbformat='csv', alt_objfun=None) + sampler=spotpy.algorithms.dream(spot_setup, dbname='DREAM_hymod', dbformat='csv') r_hat = sampler.sample(rep,nChains=nChains,convergence_limit=convergence_limit, runs_after_convergence=runs_after_convergence) @@ -93,19 +93,14 @@ # Example plot to show the parameter distribution ###### + parameters = spotpy.parameter.get_parameters_array(spot_setup) - def find_min_max(spot_setup): - randompar=spot_setup.parameters()['random'] - for i in range(1000): - randompar=np.column_stack((randompar,spot_setup.parameters()['random'])) - return np.amin(randompar,axis=1),np.amax(randompar,axis=1) - - min_vs,max_vs = find_min_max(spot_setup) + min_vs,max_vs = parameters['minbound'], parameters['maxbound'] fig= plt.figure(figsize=(16,16)) plt.subplot(5,2,1) - x = results['par'+str(spot_setup.parameters()['name'][0])] + x = results['par'+str(parameters['name'][0])] for i in range(int(max(results['chain']))): index=np.where(results['chain']==i) plt.plot(x[index],'.') @@ -114,7 +109,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,2) - x = results['par'+spot_setup.parameters()['name'][0]][int(len(results)*0.5):] + x = results['par'+parameters['name'][0]][int(len(results)*0.5):] normed_value = 1 hist, bins = np.histogram(x, bins=20, density=True) widths = np.diff(bins) @@ -126,7 +121,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,3) - x = results['par'+spot_setup.parameters()['name'][1]] + x = results['par'+parameters['name'][1]] for i in range(int(max(results['chain']))): index=np.where(results['chain']==i) plt.plot(x[index],'.') @@ -134,7 +129,7 @@ def find_min_max(spot_setup): plt.ylim(min_vs[1],max_vs[1]) plt.subplot(5,2,4) - x = results['par'+spot_setup.parameters()['name'][1]][int(len(results)*0.5):] + x = results['par'+parameters['name'][1]][int(len(results)*0.5):] normed_value = 1 hist, bins = np.histogram(x, bins=20, density=True) widths = np.diff(bins) @@ -146,7 +141,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,5) - x = results['par'+spot_setup.parameters()['name'][2]] + x = results['par'+parameters['name'][2]] for i in range(int(max(results['chain']))): index=np.where(results['chain']==i) plt.plot(x[index],'.') @@ -155,7 +150,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,6) - x = results['par'+spot_setup.parameters()['name'][2]][int(len(results)*0.5):] + x = results['par'+parameters['name'][2]][int(len(results)*0.5):] normed_value = 1 hist, bins = np.histogram(x, bins=20, density=True) widths = np.diff(bins) @@ -166,7 +161,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,7) - x = results['par'+spot_setup.parameters()['name'][3]] + x = results['par'+parameters['name'][3]] for i in range(int(max(results['chain']))): index=np.where(results['chain']==i) plt.plot(x[index],'.') @@ -175,7 +170,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,8) - x = results['par'+spot_setup.parameters()['name'][3]][int(len(results)*0.5):] + x = results['par'+parameters['name'][3]][int(len(results)*0.5):] normed_value = 1 hist, bins = np.histogram(x, bins=20, density=True) widths = np.diff(bins) @@ -186,7 +181,7 @@ def find_min_max(spot_setup): plt.subplot(5,2,9) - x = results['par'+spot_setup.parameters()['name'][4]] + x = results['par'+parameters['name'][4]] for i in range(int(max(results['chain']))): index=np.where(results['chain']==i) plt.plot(x[index],'.') @@ -195,7 +190,7 @@ def find_min_max(spot_setup): plt.xlabel('Iterations') plt.subplot(5,2,10) - x = results['par'+spot_setup.parameters()['name'][4]][int(len(results)*0.5):] + x = results['par'+parameters['name'][4]][int(len(results)*0.5):] normed_value = 1 hist, bins = np.histogram(x, bins=20, density=True) widths = np.diff(bins) diff --git a/spotpy/examples/tutorial_rosenbrock.py b/spotpy/examples/tutorial_rosenbrock.py index d699b2cd..182b51ef 100644 --- a/spotpy/examples/tutorial_rosenbrock.py +++ b/spotpy/examples/tutorial_rosenbrock.py @@ -1,3 +1,4 @@ +a # -*- coding: utf-8 -*- ''' Copyright (c) 2018 by Tobias Houska @@ -107,5 +108,5 @@ evaluation = spot_setup.evaluation() # Example how to plot the data -#algorithms = ['mc','lhs','mle','mcmc','sceua','sa','demcz','rope','abc','fscabc', 'demcz', 'dream'] -#spotpy.analyser.plot_parametertrace_algorithms(results,algorithmnames=algorithms,parameternames=['x','y']) +algorithms = ['mc','lhs','mle','mcmc','sceua','sa','demcz','rope','abc','fscabc', 'demcz', 'dream'] +spotpy.analyser.plot_parametertrace_algorithms(results,algorithmnames=algorithms,parameternames=['x','y']) diff --git a/spotpy/examples/tutorial_sceua_hymod.py b/spotpy/examples/tutorial_sceua_hymod.py index 5976fe90..efedb3bc 100644 --- a/spotpy/examples/tutorial_sceua_hymod.py +++ b/spotpy/examples/tutorial_sceua_hymod.py @@ -24,19 +24,22 @@ spot_setup=spot_setup(_used_algorithm='sceua') #Select number of maximum allowed repetitions - rep=10000 - + rep=5000 + filename = 'SCEUA_hymod' # Create the SCE-UA sampler of spotpy, alt_objfun is set to None to force SPOTPY # to jump into the def objectivefunction in the spot_setup class (default is # spotpy.objectivefunctions.rmse) - sampler=spotpy.algorithms.sceua(spot_setup, dbname='SCEUA_hymod', dbformat='csv', alt_objfun=None) + sampler=spotpy.algorithms.sceua(spot_setup, dbname='SCEUA_hymod', dbformat='csv') #Start the sampler, one can specify ngs, kstop, peps and pcento id desired - sampler.sample(rep,ngs=10, kstop=50, peps=0.1, pcento=0.1) + sampler.sample(rep)#,ngs=10, kstop=50, peps=0.1, pcento=0.1) # Load the results gained with the sceua sampler, stored in SCEUA_hymod.csv results = spotpy.analyser.load_csv_results('SCEUA_hymod') + + print(len(results), 'runs were saved.') + fig= plt.figure(1,figsize=(9,5)) plt.plot(results['like1']) plt.show() diff --git a/spotpy/unittests/test_algorithms.py b/spotpy/unittests/test_algorithms.py index de2ce22a..1026e087 100644 --- a/spotpy/unittests/test_algorithms.py +++ b/spotpy/unittests/test_algorithms.py @@ -26,7 +26,6 @@ def setUp(self): # How many digits to match in case of floating point answers self.tolerance = 7 #Create samplers for every algorithm: - self.spot_setup = spot_setup() self.rep = 987 self.timeout = 10 #Given in Seconds @@ -34,85 +33,83 @@ def setUp(self): self.dbformat = "ram" def test_mc(self): - sampler=spotpy.algorithms.mc(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.mc(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_lhs(self): - sampler=spotpy.algorithms.lhs(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.lhs(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_mle(self): - sampler=spotpy.algorithms.mle(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.mle(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_mcmc(self): - sampler=spotpy.algorithms.mcmc(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.mcmc(spot_setup(used_algorithm='mcmc'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_demcz(self): - sampler=spotpy.algorithms.demcz(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.demcz(spot_setup(used_algorithm='demcz'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep, convergenceCriteria=0) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_dream(self): - sampler=spotpy.algorithms.dream(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) - sampler.sample(self.rep) + sampler=spotpy.algorithms.dream(spot_setup(used_algorithm='dream'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler.sample(self.rep, convergence_limit=0.9, runs_after_convergence=500) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_sceua(self): - sampler=spotpy.algorithms.sceua(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.sceua(spot_setup(used_algorithm='sceua'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertLessEqual(len(results), self.rep) #Sceua save per definition not all sampled runs def test_abc(self): - sampler=spotpy.algorithms.abc(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.abc(spot_setup(used_algorithm='abc'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_fscabc(self): - sampler=spotpy.algorithms.fscabc(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.fscabc(spot_setup(used_algorithm='fscabc'),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_rope(self): - sampler=spotpy.algorithms.rope(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.rope(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_sa(self): - sampler=spotpy.algorithms.sa(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.sa(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_list(self): #generate a List sampler input - print(self.spot_setup.simulation) - sampler=spotpy.algorithms.mc(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat='csv', sim_timeout=self.timeout) + sampler=spotpy.algorithms.mc(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat='csv', sim_timeout=self.timeout) sampler.sample(self.rep) - print(self.spot_setup.simulation) - sampler=spotpy.algorithms.list_sampler(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.list_sampler(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep) results = sampler.getdata() self.assertEqual(len(results), self.rep) def test_fast(self): - sampler=spotpy.algorithms.fast(self.spot_setup,parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) + sampler=spotpy.algorithms.fast(spot_setup(),parallel=self.parallel, dbname='Rosen', dbformat=self.dbformat, sim_timeout=self.timeout) sampler.sample(self.rep, M=5) results = sampler.getdata() self.assertEqual(len(results), self.rep) #Si values should be returned diff --git a/spotpy/unittests/test_analyser.py b/spotpy/unittests/test_analyser.py index 3b7ad8f0..600713e5 100644 --- a/spotpy/unittests/test_analyser.py +++ b/spotpy/unittests/test_analyser.py @@ -27,33 +27,54 @@ import numpy as np import spotpy.analyser import os -import pickle import sys +from spotpy.examples.spot_setup_rosenbrock import spot_setup as rosenbrock_setup +from spotpy.examples.spot_setup_griewank import spot_setup as griewank_setup +from spotpy.examples.spot_setup_hymod_python import spot_setup as hymod_setup + class TestAnalyser(unittest.TestCase): - def setUp(self): + @classmethod + def setUpClass(self): np.random.seed(42) - self.rep = 100 - if not os.path.isfile("setUp_pickle_file"): - from spotpy.examples.spot_setup_rosenbrock import spot_setup - spot_setup_object = spot_setup() - self.results = [] - parallel = "seq" - dbformat = "ram" - timeout = 5 - - self.sampler = spotpy.algorithms.dream(spot_setup_object, parallel=parallel, dbname='RosenDREAM', dbformat=dbformat, - sim_timeout=timeout) - - self.sampler.sample(self.rep) - self.results.append(self.sampler.getdata()) - self.results = np.array(self.results) - + self.rep = 300 + self.parallel = "seq" + self.dbformat = "ram" + self.timeout = 5 + self.fig_name = 'test_output.png' + + sampler = spotpy.algorithms.mc(rosenbrock_setup(), + sim_timeout=self.timeout) + sampler.sample(self.rep) + self.results = sampler.getdata() + + sampler = spotpy.algorithms.mc(griewank_setup(), + sim_timeout=self.timeout) + sampler.sample(self.rep) + self.griewank_results = sampler.getdata() + if sys.version_info >= (3, 6): # FAST is only fully operational under + #Python 3 + sampler = spotpy.algorithms.fast(rosenbrock_setup(), + sim_timeout=self.timeout) + sampler.sample(self.rep) + self.sens_results = sampler.getdata() + #Hymod resuts are empty with Python <3.6 + sampler = spotpy.algorithms.dream(hymod_setup(_used_algorithm='dream'), + sim_timeout=self.timeout) + self.r_hat = sampler.sample(self.rep) + self.hymod_results = sampler.getdata() + + def test_setUp(self): + self.assertEqual(len(self.results), self.rep) + self.assertEqual(len(self.griewank_results), self.rep) + if sys.version_info >= (3, 6): + self.assertEqual(len(self.hymod_results), self.rep) + self.assertEqual(len(self.sens_results), self.rep) + def test_get_parameters(self): self.assertEqual(len(spotpy.analyser.get_parameters( - self.results - )[0][0]), 3) + self.results)[0]), 3) def test_get_parameternames(self): self.assertEqual(spotpy.analyser.get_parameternames( @@ -89,8 +110,8 @@ def test_get_like_fields(self): def test_calc_like(self): calc_like = spotpy.analyser.calc_like( self.results, - self.sampler.evaluation,spotpy.objectivefunctions.rmse) - self.assertEqual(len(calc_like), 1) + rosenbrock_setup().evaluation(),spotpy.objectivefunctions.rmse) + self.assertEqual(len(calc_like), len(self.results)) self.assertEqual(type(calc_like), type([])) def test_get_best_parameterset(self): @@ -107,8 +128,8 @@ def test_get_modelruns(self): get_modelruns = spotpy.analyser.get_modelruns( self.results ) - self.assertEqual(len(get_modelruns[0][0]), 1) - self.assertEqual(type(get_modelruns[0][0]), np.void) + self.assertEqual(len(get_modelruns[0]), 1) + self.assertEqual(type(get_modelruns[0]), np.void) def test_get_header(self): get_header = spotpy.analyser.get_header( @@ -118,25 +139,19 @@ def test_get_header(self): self.assertEqual(type(get_header), type(())) def test_get_min_max(self): - from spotpy.examples.spot_setup_ackley import spot_setup as sp_ackley - sp_ackley = sp_ackley() - get_min_max = spotpy.analyser.get_min_max(spotpy_setup=sp_ackley) - self.assertEqual(len(get_min_max[0]), 30) + get_min_max = spotpy.analyser.get_min_max(spotpy_setup=rosenbrock_setup()) + self.assertEqual(len(get_min_max[0]), 3) self.assertEqual(type(get_min_max), type(())) def test_get_parbounds(self): - from spotpy.examples.spot_setup_ackley import spot_setup as sp_ackley - sp_ackley = sp_ackley() - get_parbounds = spotpy.analyser.get_parbounds(spotpy_setup=sp_ackley) - + get_parbounds = spotpy.analyser.get_parbounds(spotpy_setup=rosenbrock_setup()) self.assertEqual(len(get_parbounds[0]), 2) - self.assertEqual(len(get_parbounds), 30) + self.assertEqual(len(get_parbounds), 3) self.assertEqual(type(get_parbounds), type([])) def test_get_percentiles(self): get_percentiles = spotpy.analyser.get_percentiles( - self.results - ) + self.results) self.assertEqual(len(get_percentiles),5) self.assertEqual(type(get_percentiles[0][0]), type(np.abs(-1.0))) self.assertEqual(type(get_percentiles),type(())) @@ -153,348 +168,216 @@ def test__geweke(self): def test_plot_Geweke(self): sample1 = [] - for a in self.results[0]: + for a in self.results: sample1.append(a[0]) spotpy.analyser.plot_Geweke(sample1,"sample1") - fig_name = "test_plot_Geweke.png" - plt.savefig(fig_name) + plt.savefig(self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + def test_get_sensitivity_of_fast(self): - from spotpy.examples.spot_setup_rosenbrock import spot_setup - spot_setup_object = spot_setup() - results = [] - parallel = "seq" - dbformat = "ram" - timeout = 5 - self.sampler = spotpy.algorithms.fast(spot_setup_object, parallel=parallel, - dbname='test_get_sensitivity_of_fast', dbformat=dbformat, - sim_timeout=timeout) - self.sampler.sample(200) - results.append(self.sampler.getdata()) - results = np.array(results)[0] - get_sensitivity_of_fast = spotpy.analyser.get_sensitivity_of_fast(results) - self.assertEqual(len(get_sensitivity_of_fast), 2) - self.assertEqual(len(get_sensitivity_of_fast["S1"]), 3) - self.assertEqual(len(get_sensitivity_of_fast["ST"]), 3) - self.assertEqual(type(get_sensitivity_of_fast), type({})) + if sys.version_info >= (3, 6): + get_sensitivity_of_fast = spotpy.analyser.get_sensitivity_of_fast(self.sens_results) + self.assertEqual(len(get_sensitivity_of_fast), 2) + self.assertEqual(len(get_sensitivity_of_fast["S1"]), 3) + self.assertEqual(len(get_sensitivity_of_fast["ST"]), 3) + self.assertEqual(type(get_sensitivity_of_fast), type({})) def test_get_simulation_fields(self): get_simulation_fields = spotpy.analyser.get_simulation_fields( - self.results - ) + self.results) self.assertEqual(['simulation_0'],get_simulation_fields) def test_compare_different_objectivefunctions(self): - from spotpy.examples.spot_setup_hymod_python import spot_setup as sp - sp = sp() - sampler = spotpy.algorithms.dream(sp, parallel="seq", dbname='test_compare_different_objectivefunctions', - dbformat="ram", - sim_timeout=5) + sampler = spotpy.algorithms.lhs(rosenbrock_setup(), + sim_timeout=self.timeout) - sampler_mcmc = spotpy.algorithms.mcmc(sp, parallel="seq", dbname='test_compare_different_objectivefunctions', - dbformat="ram", - sim_timeout=5) - - sampler.sample(50) - sampler_mcmc.sample(50) + sampler.sample(self.rep) compare_different_objectivefunctions = spotpy.analyser.compare_different_objectivefunctions( - sampler_mcmc.bestlike, sampler.bestlike) + sampler.getdata()['like1'], self.results['like1']) self.assertEqual(type(compare_different_objectivefunctions[1]),type(np.array([0.5])[0])) def test_plot_parameter_uncertainty(self): - posterior = spotpy.analyser.get_posterior(self.results,percentage=10) - self.assertAlmostEqual(len(posterior)/100, self.rep/1000, 1) - self.assertEqual(type(posterior), type(np.array([]))) - spotpy.analyser.plot_parameter_uncertainty(posterior, - self.sampler.evaluation) - - fig_name = "test_plot_parameter_uncertainty.png" - plt.savefig(fig_name) + if sys.version_info >= (3, 6): + posterior = spotpy.analyser.get_posterior(self.hymod_results,percentage=10) + self.assertAlmostEqual(len(posterior), self.rep*0.1, 2) + self.assertEqual(type(posterior), type(np.array([]))) + spotpy.analyser.plot_parameter_uncertainty(posterior, + hymod_setup().evaluation(), + fig_name=self.fig_name) + + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so + # we expecting a plot with some content without testing the structure of the plot, just + # the size + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) - # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just - # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - # tidy up all - os.remove(fig_name) def test_plot_fast_sensitivity(self): - from spotpy.examples.spot_setup_rosenbrock import spot_setup - spot_setup_object = spot_setup() - parallel = "seq" - dbformat = "ram" - timeout = 5 - - sampler = spotpy.algorithms.fast(spot_setup_object, parallel=parallel, - dbname='test_get_sensitivity_of_fast', dbformat=dbformat, - sim_timeout=timeout) - sampler.sample(300) - results = [] - results.append(sampler.getdata()) - results = np.array(results)[0] - print("Sampler is done with") - spotpy.analyser.plot_fast_sensitivity(results) - - fig_name = "FAST_sensitivity.png" - - # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just - # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - # tidy up all - os.remove(fig_name) - - - def setup_griewank(self): - if not os.path.isfile("setup_griewank_pickle"): - from spotpy.examples.spot_setup_griewank import spot_setup - # Create samplers for every algorithm: - spot_setup = spot_setup() - rep = 100 - timeout = 10 # Given in Seconds - - parallel = "seq" - dbformat = "csv" - - sampler = spotpy.algorithms.mc(spot_setup, parallel=parallel, dbname='RosenMC', dbformat=dbformat, - sim_timeout=timeout) - sampler.sample(rep) - - fl = open("setup_griewank_pickle", "wb") - pickle.dump({"getdata": sampler.getdata(), "evaluation": sampler.evaluation}, fl) - fl.close() - - with open("setup_griewank_pickle", "rb") as file: - return pickle.load(file) - + if sys.version_info >= (3, 6): + spotpy.analyser.plot_fast_sensitivity(self.sens_results,fig_name=self.fig_name) + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so + # we expecting a plot with some content without testing the structure of the plot, just + # the size + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + def test_plot_heatmap_griewank(self): - fig_name = "test.png" - spotpy.analyser.plot_heatmap_griewank([self.setup_griewank()["getdata"]],["test"]) + spotpy.analyser.plot_heatmap_griewank([self.griewank_results],["test"], + fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) - def test_plot_objectivefunction(self): - fig_name = "test_plot_objectivefunction.png" - spotpy.analyser.plot_objectivefunction(self.results - , self.sampler.evaluation) - plt.savefig(fig_name) + def test_plot_objectivefunction(self): + spotpy.analyser.plot_objectivefunction(self.results, + rosenbrock_setup().evaluation(), + fig_name=self.fig_name) + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_parametertrace_algorithms(self): - spotpy.analyser.plot_parametertrace_algorithms([self.setup_griewank()["getdata"]],["test_plot_parametertrace_algorithms"]) - fig_name = "test2.png" - + spotpy.analyser.plot_parametertrace_algorithms([self.griewank_results], + ["test_plot_parametertrace_algorithms"], + spot_setup=griewank_setup(), + fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + os.remove(self.fig_name) def test_plot_parametertrace(self): - spotpy.analyser.plot_parametertrace(self.setup_griewank()["getdata"], ["0","1"]) - fig_name = "0_1__trace.png" + spotpy.analyser.plot_parametertrace(self.griewank_results, ["0","1"], fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_posterior_parametertrace(self): - spotpy.analyser.plot_posterior_parametertrace(self.setup_griewank()["getdata"], ["0","1"]) - fig_name = "0_1__trace.png" - + spotpy.analyser.plot_posterior_parametertrace(self.griewank_results, ["0","1"], fig_name=self.fig_name) + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) - def test_plot_posterior(self): - spotpy.analyser.plot_posterior(self.results[0] - , self.sampler.evaluation) - fig_name = "bestmodelrun.png" - - # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just - # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + def test_plot_posterior(self): + if sys.version_info >= (3, 6): + spotpy.analyser.plot_posterior(self.hymod_results + , hymod_setup().evaluation(),fig_name=self.fig_name) + + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so + # we expecting a plot with some content without testing the structure of the plot, just + # the size + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + def test_plot_bestmodelrun(self): - samp = self.setup_griewank() - spotpy.analyser.plot_bestmodelrun(samp["getdata"], samp["evaluation"]) - fig_name="Best_model_run.png" + spotpy.analyser.plot_bestmodelrun(self.griewank_results, + griewank_setup().evaluation(), fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + os.remove(self.fig_name) def test_plot_bestmodelruns(self): - spotpy.analyser.plot_bestmodelruns( - self.results, self.sampler.evaluation, - dates=range(1, 1+len(self.sampler.evaluation)), algorithms=["test"]) - fig_name = "bestmodelrun.png" + if sys.version_info >= (3, 6): + spotpy.analyser.plot_bestmodelruns( + [self.hymod_results[0:10],self.hymod_results[10:20]], hymod_setup().evaluation(), + dates=range(1, 1+len(hymod_setup().evaluation())), algorithms=["test", "test2"], + fig_name=self.fig_name) - # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just - # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so + # we expecting a plot with some content without testing the structure of the plot, just + # the size + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_objectivefunctiontraces(self): - spotpy.analyser.plot_objectivefunctiontraces(self.results - , self.sampler.evaluation - , ["test"]) - fig_name="Like_trace.png" + spotpy.analyser.plot_objectivefunctiontraces([self.results], + [rosenbrock_setup().evaluation()], + ["test"],fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_regression(self): - from spotpy.examples.spot_setup_rosenbrock import spot_setup - spot_setup_object = spot_setup() - parallel = "mpc" - dbformat = "ram" - timeout = 5 - sampler = spotpy.algorithms.mc(spot_setup_object, parallel=parallel, - dbname='test_plot_regression', dbformat=dbformat, - sim_timeout=timeout) - sampler.sample(300) - - spotpy.analyser.plot_regression(sampler.getdata(), sampler.evaluation) - fig_name="regressionanalysis.png" + spotpy.analyser.plot_regression(self.results, rosenbrock_setup().evaluation(), + fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) - + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) + def test_plot_parameterInteraction(self): # Test only untder Python 3.6 as lower versions results in a strange ValueError if sys.version_info >= (3, 6): - self.setup_MC_results() - spotpy.analyser.plot_parameterInteraction(pickle.load(open("test_analyser_MC_results","rb"))) - fig_name = "ParameterInteraction.png" - + spotpy.analyser.plot_parameterInteraction(self.results, + fig_name = self.fig_name) + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_allmodelruns(self): - from spotpy.examples.spot_setup_hymod_python import spot_setup as sp - sp = sp() - - sampler = spotpy.algorithms.dream(sp, parallel="seq", dbname='test_plot_allmodelruns', - dbformat="ram", - sim_timeout=5) - - sampler.sample(50) - - modelruns = [] - for run in sampler.getdata(): - on_run = [] - for i in run: - on_run.append(i) - on_run = np.array(on_run)[:-7] - modelruns.append(on_run.tolist()) - - test_plot_allmodelruns = spotpy.analyser.plot_allmodelruns(modelruns, sp.evaluation(), - dates=range(1, len(sp.evaluation()) + 1)) - - fig_name = "bestmodel.png" - - # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just - # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) + if sys.version_info >= (3, 6): + modelruns = [] + for run in self.hymod_results: + on_run = [] + for i in run: + on_run.append(i) + on_run = np.array(on_run)[:-7] + modelruns.append(on_run.tolist()) + spotpy.analyser.plot_allmodelruns(modelruns, hymod_setup().evaluation(), + dates=range(1, len(hymod_setup().evaluation()) + 1), + fig_name=self.fig_name) + + # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so + # we expecting a plot with some content without testing the structure of the plot, just + # the size + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) - os.remove(fig_name) def test_plot_autocorellation(self): - self.setup_MC_results() - - results = [] - results.append(pickle.load(open("test_analyser_MC_results","rb"))) - results = np.array(results) - - spotpy.analyser.plot_autocorellation(results["parcmax"][0],"parcmax") - - fig_name="Autocorellationparcmax.png" + spotpy.analyser.plot_autocorellation(self.results["parx"],"parx", fig_name=self.fig_name) # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so - # we expecting a plot with some content without testing the structure of the pot just + # we expecting a plot with some content without testing the structure of the plot, just # the size - self.assertGreaterEqual(os.path.getsize(fig_name), 8855) - os.remove(fig_name) + self.assertGreaterEqual(os.path.getsize(self.fig_name), 8855) def test_plot_gelman_rubin(self): - from spotpy.examples.spot_setup_hymod_python import spot_setup as sp - sp = sp() - sampler = spotpy.algorithms.dream(sp, parallel="seq", dbname='test_plot_autocorellation', - dbformat="csv", - sim_timeout=5) - - r_hat = sampler.sample(100) - - fig_name = "gelman_rubin.png" - spotpy.analyser.plot_gelman_rubin(r_hat) - plt.savefig(fig_name) - self.assertGreaterEqual(abs(os.path.getsize(fig_name)), 100) - - os.remove(fig_name) - - def setup_MC_results(self): - - picklefilename = "test_analyser_MC_results" - if not os.path.isfile(picklefilename): - from spotpy.examples.spot_setup_hymod_python import spot_setup as sp - sp = sp() + if sys.version_info >= (3, 6): + spotpy.analyser.plot_gelman_rubin(self.r_hat, fig_name =self.fig_name) + self.assertGreaterEqual(abs(os.path.getsize(self.fig_name)), 100) - sampler = spotpy.algorithms.mc(sp, parallel="seq", dbname='test_plot_autocorellation', - dbformat="csv", - sim_timeout=5) - sampler.sample(100) - pickfil = open(picklefilename, "wb") - pickle.dump(sampler.getdata(), pickfil) @classmethod - def tearDownClass(cls): + def tearDownClass(self): try: - os.remove("RosenMC.csv") - os.remove("setup_griewank_pickle") - os.remove("test_plot_autocorellation.csv") - os.remove("test_analyser_MC_results") - os.remove("Posteriot_parameter_uncertainty.png") - except FileNotFoundError: + os.remove('test_output.png') + except (FileNotFoundError, IOError): pass