From 0fbead2e32d6e8d4d0aa2076959970af398b0478 Mon Sep 17 00:00:00 2001
From: Philipp Griewank <philipp.griewank@uni-koeln.de>
Date: Mon, 12 Jul 2021 11:20:13 +0200
Subject: [PATCH] Added single step forecast and analysis

Referred to as quads because the function returns a background, analysis, forecast, and blind/free forecast.
---
 da_functions.py   | 86 +++++++++++++++++++++++++++++++++++++++++++++++
 plot_functions.py | 56 ++++++++++++++++++++++++++++++
 2 files changed, 142 insertions(+)

diff --git a/da_functions.py b/da_functions.py
index 119dc68..003a70e 100644
--- a/da_functions.py
+++ b/da_functions.py
@@ -773,7 +773,93 @@ def gaspari_cohn_non_mirrored(x,loc_length):
         
     return gp 
     
+def single_step_analysis_forecast(state,da_const,m_const,j,t):
+    """
+    This function takes a given background state, to which is computes an analysis for provided observations. A blind forecast is computed from the background, and a forecast is computed from the analysis. 
+    
+    The main purpose of this approach of having a little isolated timestep is to enable applying different test to the same backgound state. 
 
+    """
+    
+    """
+    constant matrices that follow from previously defined constants
+    """
+    H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+    R = da_const["used_std_obs"]**2*np.identity(len(da_const["obs_loc"]))           # Observation error corvariance matrix
+    
+
+    #Construct localization matrix C if loc!=None
+    if da_const["loc"]:
+        C = loc_matrix(da_const,m_const)
+    else:
+        C=np.ones([m_const["nx"],m_const["nx"]])
+
+    """
+    Generate obs
+    """
+    #make obs by adding some noise, a fixed seed should change over time so that the differences are not always the same for each measurement location
+    if da_const["fixed_seed"]==True: np.random.seed((j+1)*(t+1))
+    
+    #changing obs to only exist where observations are to enable having multiple observations at the same space
+    #obs = np.zeros(len(da_const['obs_loc']))
+    #for o in range(len(da_const['obs_loc'])):
+    #    obs[o] = state[j]['truth'][t][0] + np.random.normal(0,da_const["True_std_obs"])
+    
+    obs = state[j]['truth'][t] + np.random.normal(0,da_const["True_std_obs"],m_const["nx"])
+    
+    #Generate new truth constants and integrate in time
+    if da_const["fixed_seed"]==True: np.random.seed(j)
+    u_truth = np.random.normal(m_const["u_ref"],m_const["u_std_truth"])
+    dhdt_truth = np.random.normal(m_const["dhdt_ref"],m_const["dhdt_std_truth"])
+    truth_forecast = linear_advection_model(state[j]['truth'][t],u_truth,dhdt_truth,m_const["dx"],da_const["dt"],da_const["nt"])
+    
+
+    """
+    EnKF
+    """
+    if da_const['method'] == 'EnKF':
+        # Compute the background error covariance matrix
+        P = np.cov(state[j]['bg'][t])*C
+    
+        # define relative weights of observations and background in terms of the Kalman Gain matrix of size
+        K = KalmanGain(P, R, H)
+    
+        # Compute the analysis for each ensemble members
+        an = np.zeros((m_const["nx"],da_const["nens"]))
+        for i in range(da_const["nens"]):
+            an[:,i] = get_analysis(state[j]['bg'][t][:,i],obs,K,H,da_const)
+    """
+    LETKF
+    """
+    if da_const['method'] == 'LETKF':
+        an,bla = LETKF_analysis(state[j]['bg'][t],state[j]['obs'][t],m_const,da_const)   
+    
+
+    """
+    Predict blind forecast and forecast
+    """
+    bf = np.zeros((m_const["nx"],da_const["nens"]))
+    fc = np.zeros((m_const["nx"],da_const["nens"]))
+    for i in range(da_const["nens"]):
+        if da_const["fixed_seed"]==True: np.random.seed(i+j*da_const["nens"])
+        u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+        dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
+        bf[:,i]    = linear_advection_model(state[j]['bg'][t][:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+        fc[:,i]    = linear_advection_model(an[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+
+    
+    """
+    create dictionary to store this single step 
+    """
+    quad_state = {}
+    quad_state['bg'] = state[j]['bg'][t]
+    quad_state['an'] = an
+    quad_state['bf'] = bf
+    quad_state['fc'] = fc
+    quad_state['tr_fc'] = truth_forecast
+    quad_state['tr_bg'] = state[j]['truth'][t]
+    quad_state['obs'] = obs
+    return quad_state
 
 def LETKF_analysis(bg,obs,m_const,da_const):
     """
diff --git a/plot_functions.py b/plot_functions.py
index b0855c8..72bdc57 100644
--- a/plot_functions.py
+++ b/plot_functions.py
@@ -64,6 +64,62 @@ def ensemble_plotter(states,m_const,da_const,j=0,t_start=0,t_end=0):
     return fig,ax
 
 
+def quad_plotter(quad_state,m_const,da_const):
+    """
+    Plots the initial background and blind forecast, as well as the analysis and forecast with observations 
+ 
+    Input: 
+    j : experiment number
+    t_start and t_end: time frame to plot
+
+    Returns: 
+    figure and axes
+    """
+    sns.set()
+    sns.set_style('whitegrid')
+
+
+    alpha = np.sqrt(1/da_const['nens'])
+    
+    fig, ax = plt.subplots(2,2,figsize=(10,5),sharex='all',sharey='all')
+  
+    
+    
+    for i in range(da_const["nens"]):
+        ax[0,0].plot(m_const['x_grid'],quad_state['bg'][:,i],'r',alpha =alpha,zorder=1)
+        ax[0,1].plot(m_const['x_grid'],quad_state['bf'][:,i],'magenta',alpha =alpha,zorder=1)
+        ax[1,0].plot(m_const['x_grid'],quad_state['an'][:,i],'b',alpha =alpha,zorder=1)
+        ax[1,1].plot(m_const['x_grid'],quad_state['fc'][:,i],'c',alpha =alpha,zorder=1)
+    
+
+    ax[0,0].plot(m_const["x_grid"],quad_state['tr_bg'],'k',zorder=10,label='truth')
+    ax[1,0].plot(m_const["x_grid"],quad_state['tr_bg'],'k',zorder=10,label='truth')
+    ax[0,1].plot(m_const["x_grid"],quad_state['tr_fc'],'k',zorder=10,label='truth')
+    ax[1,1].plot(m_const["x_grid"],quad_state['tr_fc'],'k',zorder=10,label='truth')
+    
+    ax[0,0].plot(m_const['x_grid'],np.mean(quad_state['bg'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,0].plot(m_const['x_grid'],np.mean(quad_state['an'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[0,1].plot(m_const['x_grid'],np.mean(quad_state['bf'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,1].plot(m_const['x_grid'],np.mean(quad_state['fc'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+
+    ax[0,0].scatter(m_const["x_grid"][da_const["obs_loc"]],quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+    ax[1,0].scatter(m_const["x_grid"][da_const["obs_loc"]],quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+        
+    
+    ax[0,0].set_title('background with obs')
+    ax[0,1].set_title('free forecast')
+    ax[1,0].set_title('analysis with obs')
+    ax[1,1].set_title('forecast')
+    ax[0,1].set_xlim([m_const["x_grid"][0],m_const["x_grid"][-1]])
+    plt.subplots_adjust(wspace=0.03,hspace=0.30)
+    ax[1,1].set_xlabel('x [m]')
+    ax[1,0].set_xlabel('x [m]')
+    ax[0,0].set_ylabel('h [m]')
+    ax[1,0].set_ylabel('h [m]')
+    plt.legend()
+    return fig,ax
+
+
 def B_plotter(states,j=0,ncyc=0,matrix='bg'):
     """
     Plots the covariance plotter of either the forecast/background ensemble ('bg'), analysis 'an', or the free/blind forecast 'bf'
-- 
GitLab