Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Best likelihood increasing (positive value) instead of decreasing #200

Merged
merged 6 commits into from
Jan 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ __pycache__/
#other files
*.out
*.in
*.png
site/*
mkdocs.yml

Expand Down
12 changes: 6 additions & 6 deletions spotpy/algorithms/_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,17 +323,17 @@ def update_params(self, params):
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False): # TODO: rep not necessaray

params = self.update_params(params)
like = self.getfitness(simulation=simulation, params=params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)

self.status(like, params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
if save_run is True and simulation is not None:
print('saving')
if negativlike is True:
self.save(-like, params, simulations=simulation, chains=chains)
else:
self.save(like, params, simulations=simulation, chains=chains)
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
Expand Down
5 changes: 3 additions & 2 deletions spotpy/algorithms/abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def sample(self, repetitions, eb=48, a=(1 / 10), peps=0.0001, ownlimit=False, li
work.append([like, randompar, like, randompar, c, p])
icall +=1
if self.status.stop:
print('Stopping samplig')
print('Stopping sampling')
break

while icall < repetitions and gnrng > peps:
Expand Down Expand Up @@ -205,5 +205,6 @@ def sample(self, repetitions, eb=48, a=(1 / 10), peps=0.0001, ownlimit=False, li

if gnrng < peps:
print(
'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE AT RUN')
print(icall)
self.final_call()
12 changes: 2 additions & 10 deletions spotpy/algorithms/sceua.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,6 @@ def __init__(self, *args, **kwargs):
kwargs['alt_objfun'] = 'rmse'
super(sceua, self).__init__(*args, **kwargs)

def find_min_max(self):
randompar = self.parameter()['random']
for i in range(1000):
randompar = np.column_stack(
(randompar, self.parameter()['random']))
return np.amin(randompar, axis=1), np.amax(randompar, axis=1)

def simulate(self, id_params_tuple):
"""This overwrites the simple wrapper function of _algorithms.py
and makes a two phase mpi parallelization possbile:
Expand Down Expand Up @@ -192,11 +185,10 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000
param_generator = ((rep, x[rep]) for rep in range(int(npt)))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate the objective function
like = self.postprocessing(icall, randompar, simulations, negativlike=True)
like = self.postprocessing(icall, randompar, simulations,chains=0)
xf[rep] = like
icall += 1
if self.status.stop:
#icall = repetitions
print('Stopping samplig')
break
# Sort the population in order of increasing function values;
Expand Down Expand Up @@ -258,7 +250,7 @@ def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.000000
x[k2, :] = cx[k1, :]
xf[k2] = cf[k1]
for i in range(len(likes)):
like = self.postprocessing(icall+i, pars[i], sims[i], chains=i, negativlike=True)
like = self.postprocessing(icall+i, pars[i], sims[i], chains=i+1)
if self.status.stop:
icall = repetitions
print('Stopping samplig')
Expand Down
2 changes: 1 addition & 1 deletion spotpy/analyser.py
Original file line number Diff line number Diff line change
Expand Up @@ -1016,7 +1016,7 @@ def plot_autocorellation(parameterdistribution,parametername):
cnames=list(colors.cnames)
fig=plt.figure(figsize=(16,9))
ax = plt.subplot(1,1,1)
pd.tools.plotting.autocorrelation_plot(parameterdistribution)
pd.plotting.autocorrelation_plot(parameterdistribution)
plt.savefig('Autocorellation'+str(parametername),dpi=300)


Expand Down
2 changes: 1 addition & 1 deletion spotpy/examples/spot_setup_hymod_exe.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,5 @@ def evaluation(self):
return self.evals

def objectivefunction(self,simulation,evaluation, params=None):
like = spotpy.objectivefunctions.nashsutcliffe(evaluation,simulation) # Works good
like = spotpy.objectivefunctions.nashsutcliffe(evaluation,simulation) # Works good for dream sampler
return like
25 changes: 14 additions & 11 deletions spotpy/examples/spot_setup_hymod_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,16 @@
import os

class spot_setup(object):
def __init__(self):
cmax = spotpy.parameter.Uniform(low=1.0 , high=500, optguess=412.33)
bexp = spotpy.parameter.Uniform(low=0.1 , high=2.0, optguess=0.1725)
alpha = spotpy.parameter.Uniform(low=0.1 , high=0.99, optguess=0.8127)
Ks = spotpy.parameter.Uniform(low=0.0 , high=0.10, optguess=0.0404)
Kq = spotpy.parameter.Uniform(low=0.1 , high=0.99, optguess=0.5592)
#fake1 =spotpy.parameter.Uniform(low=0.1 , high=10, optguess=0.5592)
#fake2 =spotpy.parameter.Uniform(low=0.1 , high=10, optguess=0.5592)

def __init__(self, _used_algorithm = 'default'):
self._used_algorithm = _used_algorithm
#Transform [mm/day] into [l s-1], where 1.783 is the catchment area
self.Factor = 1.783 * 1000 * 1000 / (60 * 60 * 24)
#Load Observation data from file
Expand All @@ -41,16 +50,7 @@ def __init__(self):
self.trueObs.append(float(values[3]))

climatefile.close()
self.params = [spotpy.parameter.Uniform('cmax',low=1.0 , high=500, optguess=412.33),
spotpy.parameter.Uniform('bexp',low=0.1 , high=2.0, optguess=0.1725),
spotpy.parameter.Uniform('alpha',low=0.1 , high=0.99, optguess=0.8127),
spotpy.parameter.Uniform('Ks',low=0.0 , high=0.10, optguess=0.0404),
spotpy.parameter.Uniform('Kq',low=0.1 , high=0.99, optguess=0.5592),
spotpy.parameter.Uniform('fake1',low=0.1 , high=10, optguess=0.5592),
spotpy.parameter.Uniform('fake2',low=0.1 , high=10, optguess=0.5592)]

def parameters(self):
return spotpy.parameter.generate(self.params)

def simulation(self,x):
data = hymod(self.Precip, self.PET, x[0], x[1], x[2], x[3], x[4])
Expand All @@ -63,5 +63,8 @@ def evaluation(self):
return self.trueObs[366:]

def objectivefunction(self,simulation,evaluation, params=None):
like = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(evaluation,simulation)
if self._used_algorithm == 'sceua':
like = spotpy.objectivefunctions.rmse(evaluation,simulation)
else:
like = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(evaluation,simulation)
return like
2 changes: 1 addition & 1 deletion spotpy/examples/spot_setup_rosenbrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,5 @@ def evaluation(self):
return observations

def objectivefunction(self, simulation, evaluation):
objectivefunction = -rmse(evaluation=evaluation, simulation=simulation)
objectivefunction = rmse(evaluation=evaluation, simulation=simulation)
return objectivefunction
187 changes: 146 additions & 41 deletions spotpy/examples/tutorial_sceua_hymod.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,59 +10,164 @@

import numpy as np
import spotpy
from spotpy.examples.spot_setup_hymod_exe import spot_setup
#from spotpy.examples.spot_setup_hymod_python import spot_setup
from spotpy.examples.spot_setup_hymod_python import spot_setup
import pylab as plt


if __name__ == "__main__":
parallel ='seq'
# Initialize the Hymod example (will only work on Windows systems)
#spot_setup=spot_setup(parallel=parallel)
spot_setup=spot_setup()
parallel ='seq' # Runs everthing in sequential mode
np.random.seed(2000) # Makes the results reproduceable

# Create the Dream sampler of spotpy, al_objfun is set to None to force SPOTPY
# Initialize the Hymod example
# In this case, we tell the setup which algorithm we want to use, so
# we can use this exmaple for different algorithms
spot_setup=spot_setup(_used_algorithm='sceua')

#Select number of maximum allowed repetitions
rep=10000

# Create the SCE-UA sampler of spotpy, alt_objfun is set to None to force SPOTPY
# to jump into the def objectivefunction in the spot_setup class (default is
# spotpy.objectivefunctions.log_p)
# spotpy.objectivefunctions.rmse)
sampler=spotpy.algorithms.sceua(spot_setup, dbname='SCEUA_hymod', dbformat='csv', alt_objfun=None)

#Select number of maximum repetitions
rep=20
#Start the sampler, one can specify ngs, kstop, peps and pcento id desired
sampler.sample(rep,ngs=10, kstop=50, peps=0.1, pcento=0.1)

# Select five chains and set the Gelman-Rubin convergence limit
nChains = 4
convergence_limit = 1.2
runs_after_convergence = 100
np.random.seed(42)
sampler=spotpy.algorithms.sceua(spot_setup, dbname='SCEUA_hymod', dbformat='csv', alt_objfun=None)
sampler.sample(rep)
# Load the results gained with the sceua sampler, stored in SCEUA_hymod.csv
results = spotpy.analyser.load_csv_results('SCEUA_hymod')

fig= plt.figure(1,figsize=(9,5))
plt.plot(results['like1'])
plt.show()
plt.ylabel('RMSE')
plt.xlabel('Iteration')
fig.savefig('hymod_objectivefunction.png',dpi=300)

# Example plot to show the parameter distribution ######
fig= plt.figure(2,figsize=(9,9))
normed_value = 1

plt.subplot(5,2,1)
x = results['parcmax']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1) #Ignores burn-in chain
plt.plot(x[index],'.')
plt.ylabel('cmax')
plt.ylim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)

# Load the results gained with the dream sampler, stored in DREAM_hymod.csv
results = spotpy.analyser.load_csv_results('SCEUA_hymod')
print(results['parcmax'][0:10])
# Get fields with simulation data
fields=[word for word in results.dtype.names if word.startswith('sim')]


# Example plot to show remaining parameter uncertainty #
fig= plt.figure(figsize=(16,9))
ax = plt.subplot(1,1,1)
q5,q25,q75,q95=[],[],[],[]
for field in fields:
q5.append(np.percentile(results[field][-100:-1],2.5))
q95.append(np.percentile(results[field][-100:-1],97.5))
#ax.plot(q5,color='dimgrey',linestyle='solid')
#ax.plot(q95,color='dimgrey',linestyle='solid')
#ax.fill_between(np.arange(0,len(q5),1),list(q5),list(q95),facecolor='dimgrey',zorder=0,
# linewidth=0,label='parameter uncertainty')
ax.plot(spot_setup.evaluation(),'r.',label='data')
ax.set_ylim(-50,450)
ax.set_xlim(0,729)
ax.legend()
fig.savefig('python_hymod.png',dpi=300)
#########################################################

plt.subplot(5,2,2)
x = x[int(len(results)*0.9):] #choose the last 10% of the sample
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('cmax')
plt.xlim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)


plt.subplot(5,2,3)
x = results['parbexp']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('bexp')
plt.ylim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)

plt.subplot(5,2,4)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('bexp')
plt.xlim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)



plt.subplot(5,2,5)
x = results['paralpha']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('alpha')
plt.ylim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)


plt.subplot(5,2,6)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('alpha')
plt.xlim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)


plt.subplot(5,2,7)
x = results['parKs']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('Ks')
plt.ylim(spot_setup.Ks.minbound, spot_setup.Ks.maxbound)


plt.subplot(5,2,8)
x = x[int(len(results)*0.9):]

hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('Ks')
plt.xlim(spot_setup.Ks.minbound, spot_setup.Ks.maxbound)


plt.subplot(5,2,9)
x = results['parKq']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('Kq')
plt.ylim(spot_setup.Kq.minbound, spot_setup.Kq.maxbound)
plt.xlabel('Iterations')

plt.subplot(5,2,10)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('Kq')
plt.xlabel('Parameter range')
plt.xlim(spot_setup.Kq.minbound, spot_setup.Kq.maxbound)
plt.show()
fig.savefig('hymod_parameters.png',dpi=300)
########################################################
# print(results['parcmax'][0:10])
# # Get fields with simulation data
# fields=[word for word in results.dtype.names if word.startswith('sim')]
#
#
# # Example plot to show remaining parameter uncertainty #
# fig= plt.figure(figsize=(16,9))
# ax = plt.subplot(1,1,1)
# q5,q25,q75,q95=[],[],[],[]
# for field in fields:
# q5.append(np.percentile(results[field][-100:-1],2.5))
# q95.append(np.percentile(results[field][-100:-1],97.5))
# #ax.plot(q5,color='dimgrey',linestyle='solid')
# #ax.plot(q95,color='dimgrey',linestyle='solid')
# #ax.fill_between(np.arange(0,len(q5),1),list(q5),list(q95),facecolor='dimgrey',zorder=0,
# # linewidth=0,label='parameter uncertainty')
# ax.plot(spot_setup.evaluation(),'r.',label='data')
# ax.set_ylim(-50,450)
# ax.set_xlim(0,729)
# ax.legend()
# fig.savefig('python_hymod.png',dpi=300)
# #########################################################



Expand Down
Loading