Skip to content
Snippets Groups Projects
Commit 5ca86338 authored by Stefano Serafin's avatar Stefano Serafin
Browse files

identifiability plots now display da experiment history

parent 985d7329
No related branches found
No related tags found
No related merge requests found
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
import numpy as np import numpy as np
from matplotlib import pyplot as p from matplotlib import pyplot as p
import matplotlib.colors as colors import matplotlib.colors as colors
from ENDA import experiment
def plot_CBL_assimilation(exp,figtitle,which_cycle=0,ax=None): def plot_CBL_assimilation(exp,figtitle,which_cycle=0,ax=None):
...@@ -179,12 +180,52 @@ def plot_CBL_PE(exp,figtitle,parameter_id=0,plot_spread=False,ax=None): ...@@ -179,12 +180,52 @@ def plot_CBL_PE(exp,figtitle,parameter_id=0,plot_spread=False,ax=None):
def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None): def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None):
zmax = 2000
ncont = 13
if isinstance(cbl,experiment):
# Read relevant dimensions
zt = cbl.da.zt
nz = cbl.da.nz
nens = cbl.da.nens
times = np.array([])
beg = 0
for i in range(len(cbl.da.history)):
times = np.append(times,cbl.da.history[i]['0000']['time']+beg)
beg = beg+(cbl.da.history[i]['0000']['time'].size-1)*cbl.nr.dt
ntimes = times.size
# Reconstruct_history
theta = np.zeros((nens,nz,ntimes))+np.nan
pfac = np.zeros((nens,ntimes))+np.nan
beg = 0
for i in range(len(cbl.da.history)):
end = cbl.da.history[i]['0000']['time'].size
for k in range(nens):
theta[k,:,beg:beg+end]= cbl.da.history[i]['%04u'%k]['theta']
pfac[k,beg:beg+end]= cbl.da.backgrounds[i,-1,k]
beg = beg+end
pfac = cbl.nr.parameter_transform[0](pfac,kind='inv')
# Compute Pearson correlation coefficient and Kalman Gain
# (a little noise is added to the denominator to avoid singularities)
p_x_correlation = np.zeros((nz,ntimes))+np.nan
kalman_gain = np.zeros((nz,ntimes))+np.nan
for i in range(nz):
for j in range(ntimes):
covmat=np.cov(pfac[:,j],theta[:,i,j])
p_x_correlation[i,j] = covmat[0,1] / (1e-9+np.sqrt(covmat[0,0]*covmat[1,1]))
kalman_gain[i,j] = covmat[0,1] / (1e-9+covmat[1,1]+sigma_o**2)
# For plotting
contours = np.linspace(cbl.nr.theta_0,cbl.nr.theta_0+cbl.nr.gamma*zmax,ncont)
else:
# Read relevant dimensions # Read relevant dimensions
times = cbl.history['0000']['time']
zt = cbl.zt zt = cbl.zt
nz = cbl.nz nz = cbl.nz
ntimes = times.size
nens = cbl.nens nens = cbl.nens
times = cbl.history['0000']['time']
ntimes = times.size
# Reconstruct_history # Reconstruct_history
theta = np.zeros((nens,nz,ntimes))+np.nan theta = np.zeros((nens,nz,ntimes))+np.nan
...@@ -197,16 +238,20 @@ def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None): ...@@ -197,16 +238,20 @@ def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None):
else: else:
pfac = np.ones(nens)*cbl.pfac pfac = np.ones(nens)*cbl.pfac
zmax = 2000 # Compute Pearson correlation coefficient and Kalman Gain
ncont = 13
if ax is None:
# Compute Pearson correlation coefficient
# (a little noise is added to the denominator to avoid singularities) # (a little noise is added to the denominator to avoid singularities)
p_x_correlation = np.zeros((nz,ntimes))+np.nan p_x_correlation = np.zeros((nz,ntimes))+np.nan
kalman_gain = np.zeros((nz,ntimes))+np.nan
for i in range(nz): for i in range(nz):
for j in range(ntimes): for j in range(ntimes):
covmat=np.cov(pfac,theta[:,i,j]) covmat=np.cov(pfac,theta[:,i,j])
p_x_correlation[i,j] = covmat[0,1] / (1e-9+np.sqrt(covmat[0,0]*covmat[1,1])) p_x_correlation[i,j] = covmat[0,1] / (1e-9+np.sqrt(covmat[0,0]*covmat[1,1]))
kalman_gain[i,j] = covmat[0,1] / (1e-9+covmat[1,1]+sigma_o**2)
# For plotting
contours = np.linspace(cbl.theta_0,cbl.theta_0+cbl.gamma*zmax,ncont)
if ax is None:
# Make plots # Make plots
fig = p.figure(151) fig = p.figure(151)
...@@ -245,20 +290,11 @@ def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None): ...@@ -245,20 +290,11 @@ def plot_CBL_identifiability(cbl,sigma_o,figtitle,ax=None):
fig.savefig(figtitle,format='png',dpi=300) fig.savefig(figtitle,format='png',dpi=300)
p.close(fig) p.close(fig)
else: else:
# Compute Kalman Gain
# (a little noise is added to the denominator to avoid singularities)
kalman_gain = np.zeros((nz,ntimes))+np.nan
for i in range(nz):
for j in range(ntimes):
covmat=np.cov(pfac,theta[:,i,j])
#p_x_correlation[i,j] = covmat[0,1] / (1e-9+np.sqrt(covmat[0,0]*covmat[1,1]))
kalman_gain[i,j] = covmat[0,1] / (1e-9+covmat[1,1]+sigma_o**2)
# Make plots # Make plots
c = ax.pcolormesh(times/3600.,zt,kalman_gain,norm=colors.CenteredNorm(),cmap='RdBu_r') c = ax.pcolormesh(times/3600.,zt,kalman_gain,norm=colors.CenteredNorm(),cmap='RdBu_r')
#c = ax.pcolormesh(times/3600.,zt,kalman_gain,vmin=-5, vmax=5,cmap='RdBu_r') #c = ax.pcolormesh(times/3600.,zt,kalman_gain,vmin=-5, vmax=5,cmap='RdBu_r')
ax.contour(times/3600.,zt,theta.mean(axis=0), ax.contour(times/3600.,zt,theta.mean(axis=0),
np.linspace(cbl.theta_0,cbl.theta_0+cbl.gamma*zmax,ncont), contours,
colors='black', colors='black',
linestyles='--', linestyles='--',
linewidths=0.75) linewidths=0.75)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment