diff --git a/da_functions.py b/da_functions.py
index 9194800c5e02dbb10bc28d0a2b43a3820ebd5e61..1e14c26aa3e8519b73bf5466409b818f4afb65b9 100644
--- a/da_functions.py
+++ b/da_functions.py
@@ -4,6 +4,93 @@
 
 import numpy as np
 from model_functions import *
+from misc_functions import *
+
+
+
+def set_da_constants_22(ncyc=100,nt=1,dt=550,u_std=2.0,dhdt_std=0.,
+                     True_std_obs=0.15,used_std_obs=0.15,pert_std_obs=0.15,
+                     obs_loc=np.arange(49,299,100).astype(int),
+                     True_std_obs_sat=0.25,used_std_obs_sat=0.25,pert_std_obs_sat=0.25,
+                     obs_loc_sat=np.arange(7,299,15),
+                     nens=32,nexp=1,
+                     init_noise=0.,init_spread = False, init_spread_h=0.5,init_spread_x = 500.,
+                     ens_seed=0, obs_seed=0, fixed_seed = False,
+                     loc=True,loc_length = 3200,loc_type='gaspari_cohn',
+                     method='sqEnKF'):
+    """
+    Sets constants needed for data assimilation and stores them in a dictionary.
+    Is now expanded to also use "satellite" observations, so that all observation statements are doubled, once for the default h point measurements, and once for the sat measurements.
+    
+    There is some confusting misnaming going on, e.g. "u_std_ens" = u_std, but nothing that needs to be changed immediately
+
+    nt is currently really dangerous because of how the u perubation of the linear advection modell are rerolled, please leave at 1 for now. 
+    
+    
+    
+    TODO:
+     
+    """
+    DA_const = {}
+    
+     
+    DA_const["ncyc"] = ncyc                   # Number of assimilation cycles
+    DA_const["nens"] = nens                   # number of ensemble members
+    DA_const["nexp"] = nexp                   # number of parallel experiments to run
+    
+    
+    DA_const["nt"] = nt                       # Number of model timesteps between observations
+    DA_const["dt"] = dt                       # time duration of model timesteps 
+    
+    #Assimilation Method
+    DA_const['method']       = method         # Options so far include EnKF and LETKF, both with and without localization. 
+    
+    #Ensemble Errors and ensemble  
+    DA_const["u_std_ens"]    = u_std          # Standard deviation of model u 
+    DA_const["dhdt_std_ens"] = dhdt_std       # Standard deviation of model dydt 
+    DA_const["ens_seed"]   = ens_seed     # Is added to the seed when generating ensemble paramter deviations 
+    DA_const["fixed_seed"]   = fixed_seed     # If True then the ensemble errors will always be constant 
+    
+    DA_const["True_std_obs_sat"] = True_std_obs_sat   # Standard deviation of true observation error of y 
+    DA_const["used_std_obs_sat"] = used_std_obs_sat   # Standard deviation of assumed observation error used to calculate R
+    DA_const["pert_std_obs_sat"] = pert_std_obs_sat   # Standard deviation of pertubations added to the true observation for each ensemble member individually when updating
+    
+    #Ensemble initialization
+    DA_const["init_noise"]    = init_noise    # spread of noise added to initial conditions to avoid singular matrices
+    DA_const["init_spread"]   = init_spread   # If True, adds an x and h displacement to initial ensemble spread 
+    DA_const["init_spread_h"] = init_spread_h # initial spread of ensemble in h 
+    DA_const["init_spread_x"] = init_spread_x # initial spread of ensemble in x
+    
+    #Point measurement observations
+    DA_const["True_std_obs"] = True_std_obs   # Standard deviation of true observation error of y 
+    DA_const["used_std_obs"] = used_std_obs   # Standard deviation of assumed observation error used to calculate R
+    DA_const["pert_std_obs"] = pert_std_obs   # Standard deviation of pertubations added to the true observation for each ensemble member individually when updating
+    DA_const["obs_loc"] = obs_loc           # index array of which state elements are observed
+    
+    # Satellite observations
+    DA_const["True_std_obs_sat"] = True_std_obs_sat   # Standard deviation of true observation error of y 
+    DA_const["used_std_obs_sat"] = used_std_obs_sat   # Standard deviation of assumed observation error used to calculate R
+    DA_const["pert_std_obs_sat"] = pert_std_obs_sat   # Standard deviation of pertubations added to the true observation for each ensemble member individually when updating
+    DA_const["obs_loc_sat"] = obs_loc_sat           # index array of which state elements are observed
+    
+    DA_const["obs_seed"]   = obs_seed     # Is added to the seed when generating observations 
+    
+    #Localization 
+    DA_const["loc"] = loc                     # localization yes or no, no for now
+    DA_const["loc_length"] = loc_length                     # localization yes or no, no for now
+    DA_const["loc_type"] = loc_type                    # localization yes or no, no for now
+    
+    #Observations variance matrix
+    n_obs_h   =len(DA_const["obs_loc"])
+    n_obs_sat =len(DA_const["obs_loc_sat"])
+    n_obs = n_obs_h+n_obs_sat
+    r =np.ones(n_obs)
+    r[:n_obs_h] = DA_const["used_std_obs"]**2.
+    r[n_obs_h:] = DA_const["used_std_obs_sat"]**2.
+    DA_const['R'] = np.diag(r)
+    DA_const['n_obs_h']   = n_obs_h
+    DA_const['n_obs_sat'] = n_obs_sat
+    return DA_const
 
 def set_da_constants(ncyc=10,nt=1,dt=500,u_std=0.5,dhdt_std=1e-4,True_std_obs=0.1,used_std_obs=0.1,pert_std_obs=0.1,obs_loc_h=np.arange(5,101,15),nens=20,nexp=1,init_noise=0.,fixed_seed=True,
                      loc=None,init_spread = False, init_spread_h=0.5,init_spread_x = 500.,
@@ -140,6 +227,7 @@ def create_states_dict(j,states,m_const,da_const):
     
     return DA_const
 
+
 def generate_obs(truth,states,m_const,da_const,j,t):
     """
     Generates the truth and observations for the next assimilation step.
@@ -168,6 +256,42 @@ def generate_obs(truth,states,m_const,da_const,j,t):
 
 
 
+def generate_obs_22(truth,truth_init,m_const,da_const,j,t,sat_operator):
+    """
+    Generates the truth and observations for the next assimilation step to be used when running the model forward with  assimilation steps.
+    In contrast to the old generate_obs this now includes the possibility to generate sat_obs.
+    
+    Another new difference is that the new da_const has seeds for the obs and the ensembles, so that we can set them independantly without messing around with flags
+    
+    To avoid diffusion over time in the truth I take advantage of the linearity of the model to compute it directly from the initial conditions. 
+    This only works if the truth is not perturbed each timestep though.  
+    
+    Should replace the standard function at some point
+    """
+    #Generate new truth constants and integrate in time
+    if m_const["u_std_truth"]+m_const["dhdt_std_truth"]==0:
+        #t starts at zero, that is why we need a plus 1 
+        truth = linear_advection_model(truth_init,m_const["u_ref"],m_const["u_std_truth"],m_const["dx"],da_const["dt"]*(t+1),da_const["nt"])
+
+    else:
+        np.random.seed((j+1)*(t+1)+1000+m_const['truth_seed'])
+        u_truth = np.random.normal(m_const["u_ref"],m_const["u_std_truth"])
+        dhdt_truth = np.random.normal(m_const["dhdt_ref"],m_const["dhdt_std_truth"])
+        truth = linear_advection_model(truth,u_truth,dhdt_truth,m_const["dx"],da_const["dt"],da_const["nt"])
+    
+    #make obs by adding some noise, a fixed seed should change over time so that the differences are not always the same for each measurement location
+    np.random.seed((j+1)*(t+1)+10000+da_const['obs_seed'])
+    obs = truth + np.random.normal(0,da_const["True_std_obs"],m_const["nx"])
+    if len(da_const['obs_loc_sat'])>0:
+        truth_sat = sat_operator(truth)
+        obs_sat = truth_sat + np.random.normal(0,da_const["True_std_obs_sat"],m_const["nx"]) 
+    else:
+        obs_sat = np.zeros(m_const['nx'])
+        
+    return truth, obs, obs_sat
+
+
+
 
 
 def predict(analysis,states,da_const,m_const,j):
@@ -187,6 +311,23 @@ def predict(analysis,states,da_const,m_const,j):
     states["bg"] = states["bg"]+[bg]
     return bg, states
 
+def predict_LA_22(analysis,states,da_const,m_const,j,t):
+    """
+    Runs the analysis ensemble forward in time using the model to generate the next forecast/background ensemble for the next assimilation step.
+    """
+    bg = np.zeros((m_const["nx"],da_const["nens"]))
+    for i in range(da_const["nens"]):
+        if da_const["fixed_seed"]==True: 
+            np.random.seed(i+j*da_const["nens"]+da_const['ens_seed'])
+        else: 
+            np.random.seed(i+j*da_const["nens"]+(da_const['ens_seed']+1)*t)
+        u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+        dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
+        bg[:,i]    = linear_advection_model(analysis[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+        
+    states["bg"] = states["bg"]+[bg]
+    return bg, states
+
 
 
 
@@ -329,6 +470,7 @@ def run_linear_advection_KF(m_const,da_const):
             Update
             """
             ## Combine background and observations to get the analysis
+            np.random.seed(t)
             if da_const['method'] == 'EnKF':
                 analysis, states[j] = update(background,obs,R,H,C,states[j],da_const,m_const)
             if da_const['method'] == 'LETKF':
@@ -343,6 +485,147 @@ def run_linear_advection_KF(m_const,da_const):
     return states
 
 
+def run_linear_advection_KF_22(m_const,da_const,sat_operator):
+    """
+    The heart and soul of the whole linear advection EnKF filter, now updated in the 22 version to work with satelitte data and the seeds. 
+
+    Todo: Improve the documentation
+    """
+
+    """
+    constant matrices that follow from previously defined constants
+    """
+    # should not be needed anymore H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+    R = da_const["R"]         # Observation error corvariance matrix
+
+
+
+
+    """
+    create dictionary to store background, analysis, truth and observations for verification 
+    """
+    states = {}
+
+    #Construct localization matrix C if loc!=None
+    if da_const["loc"]:
+        C = loc_matrix(da_const,m_const)
+    else:
+        C=np.ones([m_const["nx"],m_const["nx"]])
+
+    """
+    Loop over experiments
+    """
+    for j in range(da_const["nexp"]):
+
+        """
+        Initialize a random truth and a random analysis ensemble stored in "states"
+        """
+        analysis, truth, states = create_states_dict_22(j,states,m_const,da_const,sat_operator)
+        
+
+        """
+        START DATA ASSIMILATION CYCLE
+        """
+        for t in range(0,da_const["ncyc"]):
+            """
+            Run the truth forward in time until the next assimilation step and create observations. 
+            Note that this step is usually provided by nature and measurements obtained from weather stations, 
+            wind profilers, radiosondes, aircraft reports, radars, satellites, etc.
+            """
+            truth, obs, obs_sat = generate_obs_22(truth,states[j]['truth'][0],m_const,da_const,j,t,sat_operator)
+            states[j]["truth"]   = states[j]["truth"]  +[truth]
+            states[j]["obs"]     = states[j]["obs"]    +[obs]
+            states[j]["obs_sat"] = states[j]["obs_sat"]+[obs_sat]
+
+            """
+            Predict
+            """
+            # Predict the state at the next assimilation step by running the analysis forward in time
+            background, states[j] = predict_LA_22(analysis,states[j],da_const,m_const,j,t)
+
+            """
+            Update
+            """
+            ## Combine background and observations to get the analysis
+            np.random.seed(t)
+            if da_const['method'] == 'EnKF':
+                np.random.seed((j+3)**2+(t+4)**3)
+                analysis = ENKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator)
+                states[j]["an"] = states[j]["an"]+[analysis]
+            
+            if da_const['method'] == 'LETKF':
+                analysis,bla = LETKF_analysis_22(background,obs,obs_sat,m_const,da_const,sat_operator)
+                states[j]["an"] = states[j]["an"]+[analysis]
+            
+            if da_const['method'] == 'sqEnKF':
+                analysis = sqEnKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator)
+                states[j]["an"] = states[j]["an"]+[analysis]
+        """
+        END DATA ASSIMILATION CYCLE
+        """
+    """
+    end loop over experiments
+    """
+    return states
+
+
+def sqEnKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator):
+    """
+    Computes the analysis using the square root kalman Gain as introduced by Whitaker 2002. It used the modified Kalman gain to compute the change in deviations, 
+    and the normal Kalman gain to compute the change in the mean.
+    """
+    #Step 1, preparing all the basics
+    an = np.zeros((m_const["nx"],da_const["nens"]))
+    bg_obs_deviations,bg_obs_ol = state_to_observation_space(background,m_const,da_const,sat_operator)
+    bg_obs = (bg_obs_deviations.T+bg_obs_ol).T
+    bg_ol = np.mean(background,axis=1)
+    if da_const['n_obs_sat']* da_const['n_obs_h']>0:
+        obs_stack = np.hstack([obs[da_const['obs_loc']],obs_sat[da_const['obs_loc_sat']]])
+    else:
+        if da_const['n_obs_sat']>0: 
+            obs_stack = obs_sat[da_const['obs_loc_sat']]
+        if da_const['n_obs_h']>0:   
+            obs_stack = obs[da_const['obs_loc']]
+
+    dbg= (background.T-bg_ol).T
+    # calculating the analysis ensemble mean using the normal Kalman gain 
+    K = Kalman_gain_observation_deviations(background,m_const,da_const,sat_operator)
+    an_ol = bg_ol + np.dot(K,obs_stack-bg_obs_ol)
+
+    # Calculating change in ensemble deviations using squar root Kalman gain 
+    sqK = square_root_Kalman_gain_observation_deviations(background,m_const,da_const,sat_operator)
+    
+    dan = dbg -np.dot(sqK,bg_obs_deviations)
+    
+    an = (dan.T + an_ol).T
+    #print(obs_stack)
+    #print(mean)
+    #print(bg_obs[0,:])
+    #for i in range(da_const["nens"]):
+    #    an[:,i] =  background[:,i] + np.dot(K,obs_stack-bg_obs[:,i])    
+    return an
+
+
+def ENKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator):
+    """
+    Computes the analysis by individually changing each ensemble member of the forecast/background through the shared ensemble Kalman Gain and the observations.
+    """
+    # define relative weights of observations and background in terms of the Kalman Gain matrix of size
+    K = Kalman_gain_observation_deviations(background,m_const,da_const,sat_operator)
+
+    # Compute the analysis for each ensemble members
+    an = np.zeros((m_const["nx"],da_const["nens"]))
+    bg_obs_deviations,mean = state_to_observation_space(background,m_const,da_const,sat_operator)
+    bg_obs = (bg_obs_deviations.T+mean).T
+    for i in range(da_const["nens"]):
+        obs_pert_h   = obs[da_const['obs_loc']]        +np.random.normal(0,da_const["pert_std_obs"]    ,len(da_const['obs_loc'])) 
+        obs_pert_sat = obs_sat[da_const['obs_loc_sat']]+np.random.normal(0,da_const["pert_std_obs_sat"],len(da_const['obs_loc_sat'])) 
+        obs_pert = np.hstack([obs_pert_h,obs_pert_sat])
+        an[:,i] =  background[:,i] + np.dot(K,obs_pert-bg_obs[:,i])    
+    return an
+
+
+
 def get_spread_and_rmse(states,da_const,m_const):
     """
     computes RMSE over space and time respectively, averaged over all experiments for the background ensmeble mean and analysis ensemble mean.
@@ -474,6 +757,44 @@ def sum_mid_tri(x):
     idx_end = int(2*nx/3.)
     return np.sum(x[idx_str:idx_end])
 
+def sum_mid_tri_abs(x):
+    """
+    Is a sum over the absolute values over the middle of the state 
+    """
+    nx = len(x)
+    idx_str = int(nx/3.)
+    idx_end = int(2*nx/3.)
+    x_mid = x[idx_str:idx_end]
+    return np.sum(np.abs(x_mid))
+
+
+def mid_ma(x):
+    """
+    mean absolute values over the middle
+    """
+    nx = len(x)
+    idx_str = int(nx/3.)
+    idx_end = int(2*nx/3.)
+    x_mid = x[idx_str:idx_end]
+    return np.sum(np.abs(x_mid))/np.float(len(x_mid))
+
+def mid_rms(x):
+    """
+    root mean square ober mid
+    """
+    nx = len(x)
+    idx_str = int(nx/3.)
+    idx_end = int(2*nx/3.)
+    x_mid = x[idx_str:idx_end]
+    ms = np.sum(np.power(x_mid,2.))/np.float(len(x_mid))
+    return np.sqrt(ms)
+
+def randomized_obs_loc(n_obs,start=0,end=100,seed=0):
+    """randomly assignes n_obs within the window given""" 
+    np.random.seed(seed)
+    randomized_obs_loc = np.random.choice(np.arange(start,end), n_obs, replace=False)
+    randomized_obs_loc.sort()
+    return randomized_obs_loc
 
 def add_response(states,func_J=sum_mid_tri):
     """
@@ -710,9 +1031,9 @@ def var_reduction_estimate_iterative(states,m_const,da_const,j=0,dJ_update_flag=
                 dxJ = dxJ - alpha*np.outer(K,HdxJ)
 
                 dx = dxJ[:-1,:]
-                old_var_dJ = np.var(dJ)
+                old_var_dJ = np.var(dJ,ddof=1)
                 dJ = dxJ[-1,:]
-                new_var_dJ = np.var(dJ)
+                new_var_dJ = np.var(dJ,ddof=1)
 
                 if dJ_update_flag==1:
                     var_scaling = (old_var_dJ+ vr_ind_tmp[ind_min])/new_var_dJ
@@ -722,7 +1043,7 @@ def var_reduction_estimate_iterative(states,m_const,da_const,j=0,dJ_update_flag=
             if dJ_update_flag==2 or dJ_update_flag==3:
                 
                 if dJ_update_flag==2:
-                    var_scaling=(np.var(dJ)+vr_ind_tmp[ind_min])/np.var(dJ)
+                    var_scaling=(np.var(dJ,ddof=1)+vr_ind_tmp[ind_min])/np.var(dJ,ddof=1)
                     #New dJ
                     dJ=np.sqrt(var_scaling)*dJ
 
@@ -788,93 +1109,101 @@ def gaspari_cohn_non_mirrored(x,loc_length):
         
     return gp 
     
-def single_step_analysis_forecast(state,da_const,m_const,j,t):
-    """
-    This function takes a given background state, to which is computes an analysis for provided observations. A blind forecast is computed from the background, and a forecast is computed from the analysis. 
+# def single_step_analysis_forecast(state,da_const,m_const,j,t,seed_add=1):
+#     """
+#     This function takes a given background state, to which is computes an analysis for provided observations. A blind forecast is computed from the background, and a forecast is computed from the analysis. 
     
-    The main purpose of this approach of having a little isolated timestep is to enable applying different test to the same backgound state. 
+#     The main purpose of this approach of having a little isolated timestep is to enable applying different test to the same backgound state. 
 
-    """
+#     """
     
-    """
-    constant matrices that follow from previously defined constants
-    """
-    H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
-    R = da_const["used_std_obs"]**2*np.identity(len(da_const["obs_loc"]))           # Observation error corvariance matrix
+#     """
+#     constant matrices that follow from previously defined constants
+#     """
+#     H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+#     R = da_const["used_std_obs"]**2*np.identity(len(da_const["obs_loc"]))           # Observation error corvariance matrix
     
 
-    #Construct localization matrix C if loc!=None
-    if da_const["loc"]:
-        C = loc_matrix(da_const,m_const)
-    else:
-        C=np.ones([m_const["nx"],m_const["nx"]])
-
-    """
-    Generate obs
-    """
-    #make obs by adding some noise, a fixed seed should change over time so that the differences are not always the same for each measurement location
-    if da_const["fixed_seed"]==True: np.random.seed((j+1)*(t+1))
-    
-    #changing obs to only exist where observations are to enable having multiple observations at the same space
-    #obs = np.zeros(len(da_const['obs_loc']))
-    #for o in range(len(da_const['obs_loc'])):
-    #    obs[o] = state[j]['truth'][t][0] + np.random.normal(0,da_const["True_std_obs"])
-    
-    obs = state[j]['truth'][t] + np.random.normal(0,da_const["True_std_obs"],m_const["nx"])
-    
-    #Generate new truth constants and integrate in time
-    if da_const["fixed_seed"]==True: np.random.seed(j)
-    u_truth = np.random.normal(m_const["u_ref"],m_const["u_std_truth"])
-    dhdt_truth = np.random.normal(m_const["dhdt_ref"],m_const["dhdt_std_truth"])
-    truth_forecast = linear_advection_model(state[j]['truth'][t],u_truth,dhdt_truth,m_const["dx"],da_const["dt"],da_const["nt"])
-    
+#     #Construct localization matrix C if loc!=None
+#     if da_const["loc"]:
+#         C = loc_matrix(da_const,m_const)
+#     else:
+#         C=np.ones([m_const["nx"],m_const["nx"]])
 
-    """
-    EnKF
-    """
-    if da_const['method'] == 'EnKF':
-        # Compute the background error covariance matrix
-        P = np.cov(state[j]['bg'][t])*C
+#     """
+#     Generate obs
+#     """
+#     #make obs by adding some noise, a fixed seed should change over time so that the differences are not always the same for each measurement location
+#     #if da_const["fixed_seed"]==True: 
+#     #Changed to the following
+#     np.random.seed((j+1)*(t+1)*(seed_add+1))
     
-        # define relative weights of observations and background in terms of the Kalman Gain matrix of size
-        K = KalmanGain(P, R, H)
     
-        # Compute the analysis for each ensemble members
-        an = np.zeros((m_const["nx"],da_const["nens"]))
-        for i in range(da_const["nens"]):
-            an[:,i] = get_analysis(state[j]['bg'][t][:,i],obs,K,H,da_const)
-    """
-    LETKF
-    """
-    if da_const['method'] == 'LETKF':
-        an,bla = LETKF_analysis(state[j]['bg'][t],state[j]['obs'][t],m_const,da_const)   
+#     #changing obs to only exist where observations are to enable having multiple observations at the same space
+#     #obs = np.zeros(len(da_const['obs_loc']))
+#     #for o in range(len(da_const['obs_loc'])):
+#     #    obs[o] = state[j]['truth'][t][0] + np.random.normal(0,da_const["True_std_obs"])
     
-
-    """
-    Predict blind forecast and forecast
-    """
-    bf = np.zeros((m_const["nx"],da_const["nens"]))
-    fc = np.zeros((m_const["nx"],da_const["nens"]))
-    for i in range(da_const["nens"]):
-        if da_const["fixed_seed"]==True: np.random.seed(i+j*da_const["nens"])
-        u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
-        dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
-        bf[:,i]    = linear_advection_model(state[j]['bg'][t][:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
-        fc[:,i]    = linear_advection_model(an[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
-
+#     obs = state[j]['truth'][t] + np.random.normal(0,da_const["True_std_obs"],m_const["nx"])
     
-    """
-    create dictionary to store this single step 
-    """
-    quad_state = {}
-    quad_state['bg'] = state[j]['bg'][t]
-    quad_state['an'] = an
-    quad_state['bf'] = bf
-    quad_state['fc'] = fc
-    quad_state['tr_fc'] = truth_forecast
-    quad_state['tr_bg'] = state[j]['truth'][t]
-    quad_state['obs'] = obs
-    return quad_state
+#     #Generate new truth constants and integrate in time
+#     if da_const["fixed_seed"]==True: 
+#         np.random.seed(j)
+#     else:
+#         np.random.seed((j+1)*(t+1)*(seed_add+1))
+        
+        
+#     u_truth = np.random.normal(m_const["u_ref"],m_const["u_std_truth"])
+#     dhdt_truth = np.random.normal(m_const["dhdt_ref"],m_const["dhdt_std_truth"])
+#     truth_forecast = linear_advection_model(state[j]['truth'][t],u_truth,dhdt_truth,m_const["dx"],da_const["dt"],da_const["nt"])
+    
+
+#     """
+#     EnKF
+#     """
+#     if da_const['method'] == 'EnKF':
+#         # Compute the background error covariance matrix
+#         P = np.cov(state[j]['bg'][t])*C
+    
+#         # define relative weights of observations and background in terms of the Kalman Gain matrix of size
+#         K = KalmanGain(P, R, H)
+    
+#         # Compute the analysis for each ensemble members
+#         an = np.zeros((m_const["nx"],da_const["nens"]))
+#         for i in range(da_const["nens"]):
+#             an[:,i] = get_analysis(state[j]['bg'][t][:,i],obs,K,H,da_const)
+#     """
+#     LETKF
+#     """
+#     if da_const['method'] == 'LETKF':
+#         an,bla = LETKF_analysis(state[j]['bg'][t],state[j]['obs'][t],m_const,da_const)   
+    
+
+#     """
+#     Predict blind forecast and forecast
+#     """
+#     bf = np.zeros((m_const["nx"],da_const["nens"]))
+#     fc = np.zeros((m_const["nx"],da_const["nens"]))
+#     for i in range(da_const["nens"]):
+#         if da_const["fixed_seed"]==True: np.random.seed(i+j*da_const["nens"])
+#         u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+#         dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
+#         bf[:,i]    = linear_advection_model(state[j]['bg'][t][:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+#         fc[:,i]    = linear_advection_model(an[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+
+    
+#     """
+#     create dictionary to store this single step 
+#     """
+#     quad_state = {}
+#     quad_state['bg'] = state[j]['bg'][t]
+#     quad_state['an'] = an
+#     quad_state['bf'] = bf
+#     quad_state['fc'] = fc
+#     quad_state['tr_fc'] = truth_forecast
+#     quad_state['tr_bg'] = state[j]['truth'][t]
+#     quad_state['obs'] = obs
+#     return quad_state
 
 def LETKF_analysis(bg,obs,m_const,da_const):
     """
@@ -1066,7 +1395,9 @@ def single_step_analysis_forecast_v2(background,truth,da_const,m_const,model_see
     
     #Generate new truth constants and integrate in time
     if m_const['model']=='LA':
-        if da_const["fixed_seed"]==True: np.random.seed(model_seed)
+        if da_const["fixed_seed"]==True and model_seed==0: np.random.seed(model_seed)
+        if da_const["fixed_seed"]==False and model_seed!=0 : np.random.seed(model_seed)
+            
         u_truth = np.random.normal(m_const["u_ref"],m_const["u_std_truth"])
         dhdt_truth = np.random.normal(m_const["dhdt_ref"],m_const["dhdt_std_truth"])
         truth_forecast = linear_advection_model(truth,u_truth,dhdt_truth,m_const["dx"],da_const["dt"],da_const["nt"])
@@ -1140,7 +1471,7 @@ def single_step_analysis_forecast_v2(background,truth,da_const,m_const,model_see
 def vr_reloaded(background,truth,m_const,da_const,func_J=sum_mid_tri,
                 reduc = 1,reg_flag=1,
                 quad_state = None,dJdx_inv=None,alpha=None,mismatch_threshold=0.1,
-                iterative_flag=1,explicit_sens_flag = 1,exp_num=0):
+                iterative_flag=0,explicit_sens_flag = 1,exp_num=0,obs_seed=0,model_seed=0):
 
     """
     New version takes only the background and truth, then caculates the quad, finaly calculates the response function and the variance reduction. 
@@ -1179,7 +1510,7 @@ def vr_reloaded(background,truth,m_const,da_const,func_J=sum_mid_tri,
     #A precomputed quad_state can be supplied to quickly use different response functions
     ###########################################################################
     if type(quad_state)== type(None):
-        quad_state = single_step_analysis_forecast_v2(background,truth,da_const,m_const)
+        quad_state = single_step_analysis_forecast_v2(background,truth,da_const,m_const,obs_seed=obs_seed,model_seed=model_seed)
     
     ###########################################################################
     # Next we need the response functions. 
@@ -1436,8 +1767,1052 @@ def vr_reloaded(background,truth,m_const,da_const,func_J=sum_mid_tri,
     
     J_fc= fc_response
     dJ_fc = J_fc-np.mean(J_fc)
-    real_reduction=np.var(dJ_fc) - np.var(dJ_orig)
+    real_reduction=np.var(dJ_fc,ddof=1) - np.var(dJ_orig,ddof=1)
         
     return vr_total,vr_individual,real_reduction,J_dict,dJdx_inv,quad_state,dx
 
 
+def vr_individual_loc(background,truth,m_const,da_const,response_func=1,abs_flag=0,
+                quad_state = None,exp_num=0,
+                advect_flag = 0,obs_seed=0,random_samples=0,seed_samples=0,error_u=100,error_u_ens=100):
+
+    """
+    Based on vr_reloaded, but adapted to individually localize the covariances of the response functions at each point.
+    
+    important parameters:
+    response_func: controls the response, 1 results in the default sum over the mean, 2 in the mean over the mean
+    abs_flag: determines if the absolute value of the state vector is used for the response function.
+    
+    If advect_flag==1, the localization matrix is advected using the mo_const advection speed which should be close to the ensemble mean. The same advection function used as in the linear advection model. 
+    If advect_flag==2, the localization matrix is advected using the individual ensemble members, so that in the end the localization vector is broadened accoring to ensemble disperstion.
+     
+    random_samples=0,seed_samples=0,error_u=100,error_u_ens=100: these all are various ways of adding an error to the advection term.
+    
+ 
+    For now only using the all at once method, and it probably won't work for only a single observation.   
+    
+    The equations should be in the paper draft, but I made quite the mistake by thinking that the A at the end of A'-A = -KHA is also localized.
+    
+    Possible speed ups not used so far:
+    * I could speed this up a bit by only calculating the localized correlations where they will be needed (so where H says they should be), but that is a bit of work to do well so for now I will leave it out.  
+    * Given that we use Gaspari-Cohn localization most of the time, I might be able to save some time by only calculating covariances where C_i is not zero. But probably won't make a difference for toy model
+    * We currently assign j_i a value at every grid point, even though we skip calculations where response_s = 0. It would be more memory efficient to allocate J_i in a restricted area, but again I think this won't matter in a toymodel 
+    * Possibly the biggest speed up would be to avoid the loop when calculating the response functions, which I haven't bothered with yet. 
+    
+
+
+    """
+    ##########################################################################
+    #We begin by computing the vecors which determine the response function
+    ###########################################################################
+    response_s = np.zeros(m_const['nx'])
+    response_c = np.ones(m_const['nx'])
+    nx = m_const['nx']
+    if response_func ==1: 
+        idx_str = int(nx/3.)
+        idx_end = int(2*nx/3.)
+        response_s[idx_str:idx_end] = 1
+    if response_func ==2: 
+        idx_str = int(nx/3.)
+        idx_end = int(2*nx/3.)
+        response_s[idx_str:idx_end] = 1./np.float(len(response_s[idx_str:idx_end]))
+    
+    
+    
+    ###########################################################################
+    #First, need we calculate the quad (analysis, forecast, blind forecast)
+    #A precomputed quad_state can be supplied to quickly use different response functions
+    ###########################################################################
+    if type(quad_state)== type(None):
+        quad_state = single_step_analysis_forecast_v2(background,truth,da_const,m_const,obs_seed=obs_seed)
+
+    ###########################################################################
+    # Next we need the response functions. 
+    # We only really need the forecast and blind forecast, but for fun I'll calculate all for now
+    ###########################################################################
+
+    #For now I am not worried about being efficient
+    nobs = len(da_const["obs_loc"])
+    obs = np.arange(nobs)
+    nens = da_const["nens"]
+    nstate = len(truth)
+
+    bf_response = np.zeros(nens)
+    fc_response = np.zeros(nens)
+    an_response = np.zeros(nens)
+    bg_response = np.zeros(nens)
+    for n in range(da_const["nens"]):
+        if abs_flag==0:
+            bf_response[n] = np.sum(response_s * np.power(quad_state["bf"][:,n],response_c)) 
+            fc_response[n] = np.sum(response_s * np.power(quad_state["fc"][:,n],response_c))
+            an_response[n] = np.sum(response_s * np.power(quad_state["an"][:,n],response_c))
+            bg_response[n] = np.sum(response_s * np.power(quad_state["bg"][:,n],response_c))
+        if abs_flag==1:
+            bf_response[n] = np.sum(response_s * np.power(np.abs(quad_state["bf"][:,n]),response_c)) 
+            fc_response[n] = np.sum(response_s * np.power(np.abs(quad_state["fc"][:,n]),response_c))
+            an_response[n] = np.sum(response_s * np.power(np.abs(quad_state["an"][:,n]),response_c))
+            bg_response[n] = np.sum(response_s * np.power(np.abs(quad_state["bg"][:,n]),response_c))
+
+    J_dict = {}
+    J_dict['bf']  = bf_response
+    J_dict['bg']  = bg_response
+    J_dict['an']  = an_response
+    J_dict['fc']  = fc_response
+    J_dict['tr_bg']  = np.sum(response_s * np.power(quad_state["tr_bg"][:],response_c))
+    J_dict['tr_fc']  = np.sum(response_s * np.power(quad_state["tr_fc"][:],response_c))
+    if abs_flag==1:
+        J_dict['tr_bg']  = np.sum(response_s * np.power(np.abs(quad_state["tr_bg"][:]),response_c))
+        J_dict['tr_fc']  = np.sum(response_s * np.power(np.abs(quad_state["tr_fc"][:]),response_c))
+
+    ###########################################################################
+    # Creating the R,H, and C matrices we will need for the VR estimate 
+    ###########################################################################
+
+    if m_const['model'] == 'LA':
+        H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+        R = da_const["used_std_obs"]**2*np.identity(len(da_const["obs_loc"]))           # Observation error corvariance matrix
+    if m_const['model'] == 'SWM':
+        H = np.identity(m_const["nx"]*3)[da_const["obs_loc"],:]                              # Observation operator    
+        obs_error_vec = np.ones(len(da_const["obs_loc"]))*da_const['used_h_std_obs']
+        obs_error_vec[obs_error_vec<m_const['nx']] = da_const['used_u_std_obs']
+        obs_error_vec[obs_error_vec>2*m_const['nx']] = da_const['used_r_std_obs']
+        R = np.diag(obs_error_vec**2)
+
+    if da_const["loc"]:
+        C = loc_matrix(da_const,m_const)
+    else:
+        C=np.ones([m_const["nx"],m_const["nx"]])
+    if m_const['model'] == 'SWM':
+        C = np.hstack([C,C,C])
+        C = np.vstack([C,C,C])
+    
+
+    x = quad_state['bg'][:,:]
+
+    dx = x.T-np.mean(x,axis=1)
+    dx = dx.T
+
+    A = np.dot(dx,dx.T)/(dx.shape[1]-1)
+
+    #Now we need the vector of individual js for all ensembles
+    J = np.zeros_like(dx)
+    for n in range(da_const["nens"]):
+        if abs_flag==0:
+            J[:,n] = response_s * np.power(quad_state["bf"][:,n],response_c)
+        if abs_flag==1:
+            J[:,n] = response_s * np.power(np.abs(quad_state["bf"][:,n]),response_c)
+        #J[:,n] = response_s * np.power(quad_state["bg"][:,n],response_c)
+    dJ      = J.T-np.mean(J,axis=1)
+    dJ      = dJ.T
+
+    dJ_sum = bf_response - np.mean(bf_response)
+
+
+    H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+    R = da_const["used_std_obs"]**2*np.identity(len(da_const["obs_loc"]))           # Observation error corvariance matrix
+
+
+    R_obs = R # Observation error corvariance matrix
+    H_obs = H # Not the cleanest coding I know
+
+
+    HAHt = np.dot(H_obs,np.dot(C*A,H_obs.T))
+    HAHtRinv= np.linalg.inv(HAHt+R_obs)
+
+    dJHdxt           =  np.dot(dJ_sum,np.dot(H_obs,dx).T)/(nens-1)
+    CdJdX = np.zeros(m_const['nx'])
+    
+    #Since we no longer need C for the HCAHT which was calculated above, we can just advect the C matrix for the correlations and not make a new one. 
+    if advect_flag > 0:
+        if advect_flag ==1:
+            u_advect =m_const['u_ref']*error_u/100.
+            C[:,0] = semi_lagrangian_advection(C[:,0],m_const['dx'],u_advect,da_const['dt'])
+        #for c in range(C.shape[0]):
+        #    C[c,:] = semi_lagrangian_advection(C[c,:],m_const['dx'],-m_const['u_ref'],da_const['dt'])
+    
+        if advect_flag ==2:
+            C_adv = C[:,0]*0.
+            if random_samples == 0:
+                for i in range(da_const["nens"]):
+                    if da_const["fixed_seed"]==True: np.random.seed(i)
+                    u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+                    u_ens      = m_const["u_ref"]*error_u/100.+(u_ens-m_const["u_ref"])*error_u_ens/100.
+                    #print('u_ens',u_ens)
+                    C_adv = C_adv + semi_lagrangian_advection(C[:,0],m_const['dx'],u_ens,da_const['dt'])
+                    #C_adv[:,0] = C_adv[:,0] + semi_lagrangian_advection(C[:,0],m_const['dx'],10.,da_const['dt'])
+                C[:,0] = C_adv/np.float(da_const["nens"])
+            else:   
+                np.random.seed(random_samples+seed_samples)
+                randomized_ens = np.random.choice(np.arange(0,da_const["nens"]), random_samples, replace=False)
+                for r in randomized_ens:
+                    #print(r)
+                    if da_const["fixed_seed"]==True: np.random.seed(r)
+                    u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+                    u_ens      = m_const["u_ref"]*error_u/100.+(u_ens-m_const["u_ref"])*error_u_ens/100.
+                    #print('u_ens',u_ens)
+                    C_adv = C_adv + semi_lagrangian_advection(C[:,0],m_const['dx'],u_ens,da_const['dt'])
+                    #C_adv[:,0] = C_adv[:,0] + semi_lagrangian_advection(C[:,0],m_const['dx'],10.,da_const['dt'])
+                C[:,0] = C_adv/np.float(random_samples)
+        
+        for i in range(1,m_const['nx']):
+            C[:,i] = np.hstack([C[-1,i-1],C[:-1,i-1]])
+    
+    
+    #for n in range(m_const["nx"]): #Not sure if the if loop is much quicker than just calculating the dot product everywhere ;) 
+    #    if response_s[n] != 0:        CdJdX += C[n,:]*np.dot(dJ[n,:],dx.T)/(da_const['nens']-1)
+    #  Somewhat quicker way that avoids the loop
+    CdJdX=np.sum(C*np.dot(dJ,dx.T)/(da_const['nens']-1.),axis=0)
+    
+    CdJdXH = np.dot(CdJdX,H.T)
+    vr_total = - np.dot(CdJdXH,np.dot(HAHtRinv,dJHdxt).T)
+    J_fc= fc_response
+    dJ_fc = J_fc-np.mean(J_fc)
+    real_reduction=np.var(dJ_fc,ddof=1) - np.var(bf_response,ddof=1)
+
+        
+    return vr_total,real_reduction,quad_state,J_dict#,C
+
+
+#==========================================================
+#  22 version with satelitte data
+#==========================================================
+
+
+
+def LETKF_analysis_22(bg,obs,obs_sat,m_const,da_const,sat_operator):
+    """
+    Revised version of LETKF_analysis that allows for a satelitte observation 
+    
+    Follows the recipe and notation of Hunt 2007, e.g. what is normally called B is now P_a, and _ol refers to over line, so the mean.
+    P_tilde_a refers to B but in ensemble space.
+    The application when not localized is pretty straight forward. 
+    The localized version is coded maximully inefficiently. If needed two available options to speed it up are parallelization and batch processing.  
+    Currently also only uses gaspari-cohn.
+    """
+    from scipy.linalg import sqrtm #Needed to calculate the matrix square root. Can lead to complex values due to numrical noise, which I deal with by only using real values. 
+
+
+
+    n_obs_h  =len(da_const["obs_loc"])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+    n_obs = n_obs_h + n_obs_sat
+    #obs_sat = sat_operator(quad['tr_bg'],window=window)+ np.random.normal(0,da_const["used_std_obs_sat"],size=bg.shape[0])
+    obs_loc = np.hstack([da_const["obs_loc"],da_const["obs_loc_sat"]]).astype(int)
+    
+    
+    L = da_const['nens']
+    x_b = bg
+    x_ol_b = np.mean(x_b,axis=1)
+    X_b = x_b.T-x_ol_b
+    X_b = X_b.T
+
+#     r =np.ones(n_obs)
+#     r[:n_obs_h] = da_const["used_std_obs"]**2.
+#     r[n_obs_h:] = da_const["used_std_obs_sat"]**2.
+    R = da_const['R'] #np.diag(r)
+   
+    # this is the function that ports the background ensemble state to pertubations in observation space
+    Y_b, y_ol_b = state_to_observation_space(bg,m_const,da_const,sat_operator)
+    
+    # ineligant way to merge the obs vector depending on which observations occur
+    if n_obs_h>0:  y_obs = obs[da_const["obs_loc"]]
+    if n_obs_sat>0: sat_obs = obs_sat[da_const["obs_loc_sat"]]
+    if n_obs_h>0 and n_obs_sat>0 :  y_obs = np.hstack([y_obs,sat_obs])
+    if n_obs_sat>0 and n_obs_h==0 : y_obs   = sat_obs
+    
+    delta_y = y_obs-y_ol_b
+        
+    if da_const['loc']==False:
+        """ Now that all the variables are set, we start by computing the covariance matrix in ensemble state """
+        YRY = np.dot(Y_b.T,np.dot(np.linalg.inv(R),Y_b))
+        P_tilde_a = np.linalg.inv((L-1)*np.identity(L)+YRY)
+
+        """Next step, computing the enesemble mean analysis via the weighting vector w_ol_a"""
+        w_ol_a = np.dot(P_tilde_a,np.dot(Y_b.T,np.dot(np.linalg.inv(R),delta_y)))
+        x_ol_a = x_ol_b+np.dot(X_b,w_ol_a)
+
+        """We now get the ensemble by calculating the weighting matrix through a square root of the error covariance matrix, and adding the mean values to the ensemble deviations"""
+        W_a =  np.real(sqrtm((L-1)*P_tilde_a))
+        w_a = W_a+w_ol_a
+
+        x_a =  np.dot(X_b,w_a).T+ x_ol_a
+        x_a = x_a.T
+
+    else:
+        x_grid_neg = -m_const['x_grid'][::-1]-m_const['dx']
+        x_grid_ext = m_const['x_grid']+(m_const['dx']*m_const['nx'])
+        N = m_const['nx']
+        x_a_loc = np.zeros((N,L))
+        x_ol_a_loc = np.zeros((N))
+        for g in range(N):
+        #for g in range(1):
+            dist_reg = np.abs(m_const['x_grid'][obs_loc]-m_const['x_grid'][g])
+            dist_neg = m_const['x_grid'][g]-x_grid_neg[obs_loc]
+            dist_ext = x_grid_ext[obs_loc]-m_const['x_grid'][g]
+            dist = np.minimum(dist_reg,dist_ext)#apparently minimum doesn't like 3 variables   ,dist_neg)
+            dist = np.minimum(dist,dist_neg)
+            #And now we calculate the gaspari cohn weighting and apply it to inverse R 
+            gc_loc = gaspari_cohn_non_mirrored(dist,da_const['loc_length'])
+            if np.max(gc_loc) == 0.:
+                #If no observations are within the double gc radius no need to bother computing things
+                x_ol_a_loc[g] = x_ol_b[g]
+                x_a_loc[g,:]  = x_b[g,:]
+            else:
+                R_inv =  np.linalg.inv(R) * np.diag(gc_loc)
+
+                YRY = np.dot(Y_b.T,np.dot(R_inv,Y_b))
+                """ Now that all the variables are set, we start by computing the covariance matrix in ensemble state """
+                P_tilde_a = np.linalg.inv((L-1)*np.identity(L)+YRY)
+
+                """Next step, computing the enesemble mean analysis via the weighting vector w_ol_a"""
+                w_ol_a = np.dot(P_tilde_a,np.dot(Y_b.T,np.dot(R_inv,delta_y)))
+                x_ol_a = x_ol_b+np.dot(X_b,w_ol_a)
+                W_a = np.real(sqrtm((L-1.)*P_tilde_a))
+                w_a = W_a+w_ol_a
+                x_a =  np.dot(X_b,w_a).T+ x_ol_a
+                x_a = x_a.T
+
+                x_ol_a_loc[g] = x_ol_a[g]
+                x_a_loc[g,:]    = x_a[g,:]
+        x_a = x_a_loc
+        x_ol_a = x_ol_a_loc
+    return x_a, x_ol_a
+
+
+
+def state_to_observation_space(X,m_const,da_const,sat_operator):
+    """
+    Computes Y_b, so the ensembles transfered to observation space, when a satetellite operator is used.
+    
+    Should be used for LETKF, as well as for the variance reduction estimate using the modified Kalman gain
+    
+    Important, X must not be the ensemble deviations, but the full ensemble state
+    
+    Returns the ensemble deviations as well as the mean
+    
+    """
+    if np.max(np.mean(X,axis=1))<1e-10:
+        print('I think you accidentally used the ensemble deviations when calculating from state to observation space')
+    n_obs_h  =len(da_const["obs_loc"])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+    n_obs = n_obs_h + n_obs_sat
+    
+    if n_obs_h>0:
+        H = np.identity(m_const["nx"])[da_const["obs_loc"],:]                              # Observation operator    
+        y_b = np.dot(H,X)
+
+    if n_obs_sat>0:
+        #here we will use sat instead of Y and y
+        sat_b = sat_operator(X)[da_const["obs_loc_sat"],:]
+
+    if n_obs_h>0 and n_obs_sat>0 :
+        y_b = np.hstack([y_b.T,sat_b.T])
+        y_b = y_b.T
+
+    if n_obs_sat>0 and n_obs_h==0 :
+        y_b     = sat_b 
+    
+    y_ol_b =np.mean(y_b,axis=1) 
+    Y_b = y_b.T-y_ol_b
+    Y_b = Y_b.T
+
+    return Y_b, y_ol_b
+
+
+
+def Kalman_gain_observation_deviations(bg,m_const,da_const,sat_operator):
+    """
+    Calculates Kalman gain, but using YYT instead of HBH, and XYT intead of BHT,
+    This is important for satellite data because we don't need a linearized version of the satellite operator 
+    """
+    n_obs_h  =len(da_const["obs_loc"])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+    n_obs = n_obs_h + n_obs_sat
+    obs_loc = np.hstack([da_const["obs_loc"],da_const["obs_loc_sat"]]).astype(int)
+    L = da_const['nens']
+
+    dY_b, y_ol_b = state_to_observation_space(bg,m_const,da_const,sat_operator)
+    x_b = bg
+    x_ol_b = np.mean(x_b,axis=1)
+    X_b = x_b.T-x_ol_b
+    X_b = X_b.T
+   
+    R = da_const['R']
+    if da_const['loc']:
+        L_obs, L_obs_state = localization_matrices_observation_space(m_const,da_const)
+        YYlocR_inv = np.linalg.inv(L_obs*np.dot(dY_b,dY_b.T)/(L-1)+R)
+        K = L_obs_state*np.dot(X_b,np.dot(dY_b.T,YYlocR_inv))/(L-1)
+    else:
+        YYR_inv = np.linalg.inv(np.dot(dY_b,dY_b.T)/(L-1)+R)
+        K = np.dot(X_b,np.dot(dY_b.T,YYR_inv))/(L-1)
+    return K
+
+
+def square_root_Kalman_gain_observation_deviations(bg,m_const,da_const,sat_operator):
+    """
+    Calculates modified square root Kalman gain, but using YYT instead of HBH, and XYT intead of BHT,
+    This is important for satellite data because we don't need a linearized version of the satellite operator 
+    """
+    from scipy.linalg import sqrtm #Needed to calculate the matrix square root. Can lead to complex values due to numrical noise, which I deal with by only using real values. 
+    n_obs_h  =len(da_const['obs_loc'])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+    n_obs = n_obs_h + n_obs_sat
+
+    obs_loc = np.hstack([da_const["obs_loc"],da_const["obs_loc_sat"]]).astype(int)
+    L = da_const['nens']
+
+    dY_b, y_ol_b = state_to_observation_space(bg,m_const,da_const,sat_operator)
+    x_b = bg
+    x_ol_b = np.mean(x_b,axis=1)
+    X_b = x_b.T-x_ol_b
+    X_b = X_b.T
+    R = da_const['R']
+    
+    
+    #Tryig to do this directly from the formula because Tanyas implementation works but slightly confuses me.
+    if da_const['loc']:
+        L_obs, L_obs_state = localization_matrices_observation_space(m_const,da_const)
+    else:
+        L_obs = np.ones([n_obs,n_obs])
+        L_obs_state = np.ones([n_x,n_obs])
+        
+    YYR = L_obs*np.dot(dY_b,dY_b.T)/(L-1)+R
+    XY  = L_obs_state*np.dot(X_b,dY_b.T)/(L-1)
+    # Separating last two terms is modified Kalman gain into terms I and II
+    I = (np.linalg.inv(sqrtm(YYR))).T 
+    II = np.linalg.inv(sqrtm(YYR)+sqrtm(R))
+    K = np.dot(XY,np.dot(I,II))
+    
+    return K
+
+
+def localization_matrices_observation_space(m_const,da_const):
+    """
+    Creates the two localization matrices needed to calculate the localized Kalman gain with oberservation space ensemble pertubations.
+    
+    The distance calculation is still far from elegant, but I can't be bothered to make it faster as long as the LETKF is still the numerical bottleneck.
+    """
+    n_obs_h  =len(da_const["obs_loc"])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+    n_obs = n_obs_h + n_obs_sat
+    obs_loc = np.hstack([da_const["obs_loc"],da_const["obs_loc_sat"]]).astype(int)
+    L_obs = np.zeros([n_obs,n_obs])
+    L_obs_state = np.zeros([m_const['nx'],n_obs])
+    
+    x_grid_neg = -m_const['x_grid'][::-1]-m_const['dx']
+    x_grid_ext = m_const['x_grid']+(m_const['dx']*m_const['nx'])
+
+    for o in range(n_obs):
+        g = obs_loc[o] 
+        # First the one using the distance between observations and the state 
+        dist_reg = np.abs(m_const['x_grid']-m_const['x_grid'][g])
+        dist_neg = m_const['x_grid'][g]-x_grid_neg
+        dist_ext = x_grid_ext-m_const['x_grid'][g]
+        dist = np.minimum(dist_reg,dist_ext)#apparently minimum doesn't like 3 variables   ,dist_neg)
+        dist = np.minimum(dist,dist_neg)
+        gc_loc = gaspari_cohn_non_mirrored(dist,da_const['loc_length'])
+        L_obs_state[:,o] = gc_loc
+        
+        
+        L_obs[o,:] = L_obs_state[obs_loc,o]
+       
+    return L_obs, L_obs_state
+    
+def generate_obs_22_single(truth,m_const,da_const,sat_operator,obs_seed):
+    """
+    Generates the observations for the single forecast analysis, including the sat obs, for a provided seed
+    
+    """
+    np.random.seed(obs_seed)
+    obs = truth + np.random.normal(0,da_const["True_std_obs"],m_const["nx"])
+    if len(da_const['obs_loc_sat'])>0:
+        truth_sat = sat_operator(truth)
+        obs_sat = truth_sat + np.random.normal(0,da_const["True_std_obs_sat"],m_const["nx"]) 
+    else:
+        obs_sat = np.zeros(m_const['nx'])
+        
+    return obs, obs_sat
+
+def single_step_analysis_forecast_22(background,truth,da_const,m_const,sat_operator,model_seed=0,obs_seed=0):
+    """
+    Revisted version of single step analysis forecast to deal with sat data as well.
+
+    For now I will remove the SWM stuff, which can still be found in the older version
+    """
+    obs, obs_sat = generate_obs_22_single(truth,m_const,da_const,sat_operator,obs_seed) 
+    
+    #Getting the analysis
+    if da_const['method'] == 'EnKF':
+        np.random.seed(obs_seed+100000)
+        an = ENKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator)
+    
+    if da_const['method'] == 'LETKF':
+        an,bla = LETKF_analysis_22(background,obs,obs_sat,m_const,da_const,sat_operator)
+    
+    if da_const['method'] == 'sqEnKF':
+        an = sqEnKF_analysis_22(background,obs,obs_sat,da_const,m_const,sat_operator)
+
+    
+    
+
+
+    """
+    Predict blind forecast and forecast
+    """
+    bf = np.zeros_like(background)
+    fc = np.zeros_like(background)
+    
+    for i in range(da_const["nens"]):
+        np.random.seed(i+model_seed)
+        u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+        dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
+        bf[:,i]    = linear_advection_model(background[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+        fc[:,i]    = linear_advection_model(an[:,i],u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+
+    
+    """
+    create dictionary to store this single step 
+    """
+    quad_state = {}
+    quad_state['bg'] = background
+    quad_state['an'] = an
+    quad_state['bf'] = bf
+    quad_state['fc'] = fc
+    quad_state['tr_bg'] = truth
+    quad_state['obs'] = obs
+    return quad_state
+
+
+def create_states_dict_22(j,states,m_const,da_const,sat_operator):
+    """
+    Generates the initial analysis and truth.
+    Also creates the "states" dictionary where the analysis ensemble, background ensemble, truth and observations are all going to be stored. 
+    Very memory hungry, as everything from all assimilation time steps and all experiments is stored. 
+    Works find for simple model though. 
+    
+    A fixed seed is used by default so that the model errors of all ensemble members is constant. But this can also be randomized.
+    Alternative version would be to generate the model errors and store them, but this has not happened yet. Would be necessary to test some parameter estimation tests. 
+
+    Modified version of the Yvonne setup to work with the linear advection model
+
+    Todo:
+    - Describe states dictionary here. 
+    - make model errors stored variables so enable parameter estimation
+    - Maybe make a more sensible name, such as init_da_ensembles
+    - I am not convinced this is the best way to generate the initial analysis.
+    - Might be best to change to dictionary time axis. Forecast and anaylis at the same time have difference of 1 in the time integer. 
+    """
+    nx = m_const["nx"]
+
+    #initial conditions
+    if m_const['init_func']=='gaus': h_initial = gaussian_initial_condition(m_const["x_grid"],m_const["h_init_std"])
+    if m_const['init_func']=='sine': h_initial = sine_initial_condition(    m_const["x_grid"],m_const["sine_init"])
+    
+
+    #Generate truth and first observations
+    truth, obs, obs_sat = generate_obs_22(h_initial,h_initial,m_const,da_const,j,0,sat_operator)
+    
+    
+    an = np.zeros((nx,da_const["nens"]))
+    
+    #First rows full of nans :)
+    bg = np.zeros((nx,da_const["nens"]))
+    bg[:] = np.nan
+    obs = np.zeros(nx)
+    obs[:] = np.nan
+    
+    for i in range(da_const["nens"]):
+        np.random.seed(i+j*da_const["nens"]+da_const['ens_seed'])
+        if da_const["init_noise"]>0:
+            h_ens  = np.random.normal(h_initial,da_const["init_noise"])
+        else: 
+            h_ens  = h_initial
+        if da_const["init_spread"]>0:
+            #initial spread generated by moving waves forward and backward and up and down using 
+            #da_const["init_spread_h"] and da_const["init_spread_x"]
+            x_displace = np.random.normal(0.,da_const["init_spread_x"])
+            h_displace = np.random.normal(0.,da_const["init_spread_h"])
+            u_tmp = x_displace/da_const["dt"]
+            h_ens    = semi_lagrangian_advection(h_ens,m_const["dx"],u_tmp,da_const["dt"])
+            h_ens    = h_ens+h_displace
+            
+        u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+        dhdt_ens   = np.random.normal(m_const["dhdt_ref"],da_const["dhdt_std_ens"])
+        an[:,i]    = linear_advection_model(h_ens,u_ens,dhdt_ens,m_const["dx"],da_const["dt"],da_const["nt"])
+
+    states[j]={}
+    states[j]['bg']=[bg]
+    states[j]['an']=[an]
+    states[j]['truth']=[truth]
+    states[j]['obs']=[obs]
+    states[j]['obs_sat']=[obs_sat]
+    return an, truth, states
+    
+    
+    return DA_const
+
+
+def vr_reloaded_22(background,truth,m_const,da_const,sat_operator,
+                func_J=sum_mid_tri,
+                reduc = 1,reg_flag=1,
+                quad_state = None,dJdx_inv=None,alpha=None,mismatch_threshold=0.1,
+                iterative_flag=0,explicit_sens_flag = 1,exp_num=0,obs_seed=0,model_seed=0):
+
+    """
+    Version of vr_reloaded that can now also use sat obs.
+    Takes only the background and truth, then caculates the quad, finaly calculates the response function and the variance reduction. 
+    
+    The quad can be supplied to save time, which makes sense when you comparing different methods for the same experiment calculating. 
+    
+    Should also return the dJ values of all 4 ensembles (analysis, background, forecast, blind forecast)
+    
+    
+    -reg_flag added to use L2 regularization
+    
+    not implemented:
+    - state model reduction
+    
+    
+    Speed up options: 
+    - If the same forecast can be recycled, the quad can be calculated once and than passed on. 
+    - If the sensitivity can be recycled, that can be calculated once and then reused. 
+    - Don't use LETKF
+
+    First getting the explicit all at once verion working, will then add the various approaches
+    
+    to be implemented:
+    -iteratively and all at once
+    -explicit vs implicit 
+    -reduced model spacing by subsampling the grid. Reduc is the spacing, eg reduc 3 means every third grid point is used. 
+     should find the nearest reduced model grid point for each observation.
+
+    
+
+    """
+    if iterative_flag ==0: from scipy.linalg import sqrtm
+    ###########################################################################
+    #First, need we calculate the quad (analysis, forecast, blind forecast)
+    #A precomputed quad_state can be supplied to quickly use different response functions
+    ###########################################################################
+    if type(quad_state)== type(None):
+        quad_state = single_step_analysis_forecast_22(background,truth,da_const,m_const,sat_operator,obs_seed=obs_seed,model_seed=model_seed)
+    
+    ###########################################################################
+    # Next we need the response functions. 
+    # We only really need the forecast and blind forecast, but for fun I'll calculate all for now
+    ###########################################################################
+    
+    #For now I am not worried about being efficient
+    nobs = len(da_const["obs_loc"])
+    obs = np.arange(nobs)
+    nens = da_const["nens"]
+    nstate = len(truth)
+
+    bf_response = np.zeros(nens)
+    fc_response = np.zeros(nens)
+    an_response = np.zeros(nens)
+    bg_response = np.zeros(nens)
+    for n in range(da_const["nens"]):
+        bf_response[n] = func_J(quad_state["bf"][:,n])
+        fc_response[n] = func_J(quad_state["fc"][:,n])
+        an_response[n] = func_J(quad_state["an"][:,n])
+        bg_response[n] = func_J(quad_state["bg"][:,n])
+        
+    J_dict = {}
+    J_dict['bf']  = bf_response
+    J_dict['bg']  = bg_response
+    J_dict['an']  = an_response
+    J_dict['fc']  = fc_response
+    J_dict['tr_bg']  = func_J(quad_state['tr_bg'])
+    
+    
+    
+    
+    
+
+
+    #if reduc>1: #Does currently not work for 
+    #    #Defining reduced model domain/state, and then defining obs location on new reduced grid
+    #    #For the SWM model this only really makes sense if nx is a multiple of reduc 
+    #    reduced_model_domain = np.arange(0,nstate,reduc)
+    #    reduced_model_size = len(reduced_model_domain)
+    #    reduced_obs_loc = np.zeros(nobs).astype(int)
+    #    for o in range(nobs):
+    #        reduced_obs_loc[o] = (np.abs(reduced_model_domain-da_const['obs_loc'][o])).argmin()
+    #    #print('reduced_model_domain:',reduced_model_domain)
+    #    #print('reduced_model_size  :',reduced_model_size  )
+    #    #print('reduced_obs_loc     :',reduced_obs_loc     )
+    #        
+    #    #Getting the localization matrix in reduced model space
+    #    H = np.identity(m_const["nx"])[reduced_model_domain,:]   
+    #    C = np.dot(H,np.dot(C,H.T))
+    #    x = quad_state['bg'][reduced_model_domain,:]
+    #    
+    #else:
+    x = quad_state['bg'][:,:]
+        
+    dx = x.T-np.mean(x,axis=1)
+    dx = dx.T
+    dx_orig = dx+0
+    
+    A = np.dot(dx,dx.T)/(dx.shape[1]-1)
+
+    J  = bf_response
+    dJ      = J-np.mean(J)
+    dJ_orig = J-np.mean(J)
+
+
+    ###############################################################################################
+    # Sensitivity
+    ###############################################################################################
+    #Covarianz between reponse function dJ and state ensemble dx
+    cov_dJdx_vec = np.dot(dJ,dx.T)/(dx.shape[1]-1)
+
+    if explicit_sens_flag==1:
+        #If a sensitivity is provided it is used instead of calculating it 
+        if type(dJdx_inv) == np.ndarray:
+            #testing supplied sensitivity
+            rel_error_sens = np.sum(np.abs(A.dot(dJdx_inv)-cov_dJdx_vec))/np.sum(np.abs(cov_dJdx_vec))
+            if rel_error_sens>0.05:
+                print('using supplied sensitivity has a relative error of:',rel_error_sens)
+        #Computing the sensitivity, highly recommend using the regularized version before doing so. 
+        else:
+            if reg_flag == 1:
+                dJdx_inv = L2_regularized_inversion(A,cov_dJdx_vec,alpha=alpha,mismatch_threshold=mismatch_threshold)
+            else:
+                A_inv = np.linalg.pinv(A)
+                dJdx_inv = np.dot(A_inv,cov_dJdx_vec)
+
+    estimated_J = bf_response + 0.
+    
+    
+    #if iterative_flag ==0:
+        
+    vr_individual = 0.
+    
+    
+    if explicit_sens_flag ==1:
+        #Tanjas approach of calculating the square root K following Kalman gain, formula (10) Whitaker and Hamil 2002
+        sqK = square_root_Kalman_gain_observation_deviations(x,m_const,da_const,sat_operator)
+        bg_obs_deviations,bg_obs_ol = state_to_observation_space(x,m_const,da_const,sat_operator)
+        dx_prime = dx - np.dot(sqK,bg_obs_deviations)
+        
+        #estimated J calculated by updating the original dJ
+        estimated_J = estimated_J -np.dot(dJdx_inv,np.dot(sqK,bg_obs_deviations))
+        
+        #Using the cheaper variance calculation instead of going to A-B
+        new_J = np.dot(dJdx_inv.T,dx_prime)
+        vr_total = np.var(new_J,ddof=1)-np.var(np.dot(dJdx_inv.T,dx),ddof=1)
+        #print('all at once:',vr_individual)
+        dx = dx_prime
+    
+    #if explicit_sens_flag ==0: 
+    #    HAHt = np.dot(H_obs,np.dot(C*A,H_obs.T))
+    #    HAHtRinv= np.linalg.inv(HAHt+R_obs)
+
+    #    dJHdxt           =  np.dot(dJ,np.dot(H_obs,dx).T)/(nens-1)
+    #    vr_individual = -np.dot(dJHdxt,np.dot(HAHtRinv,dJHdxt))
+    #    vr_total = vr_individual
+    #
+    #
+    #if iterative_flag:
+    #    vr_individual = np.zeros(nobs)
+    #    for o in range(nobs):  #loop over each observation individually
+
+    #        #New A after dx was updated 
+    #        A = np.cov(dx,ddof=1)
+
+    #        #Selecting the single R value for this observation
+    #        R_obs = R[o,o]*np.identity(1)           # Observation error corvariance matrix
+    #        H_obs = H[o,:]   #Not sure about this :(
+    #        #if 
+    #        ##H_rms = np.identity(nobs)[o,:] 
+    #        #H_rms = np.identity(reduced_model_size)[reduced_obs_loc[o],:] 
+    #        ##H = np.identity(m_const["nx"])[da_const["obs_loc"][o],:]
+#
+    #        ##HAHt = np.dot(H,np.dot(A,H.T))
+    #        ##HAHtRinv= np.linalg.inv(HAHt+R)
+    #        ##dJHdxt = np.dot(dJ,np.dot(H,dx).T)/(nens-1)
+    #        
+    #        #Now we get the change in dx, which uses the localized A matrix and a square root 
+    #        E = np.matmul(C*A,H_obs.T)
+    #        E = np.matmul(H_obs,E)
+    #        E = E + R_obs
+    #        alpha = 1./(1.+np.sqrt(R_obs/E))
+
+
+    #        HAHt = np.dot(H_obs,np.dot(C*A,H_obs.T))
+    #        HAHtRinv= np.linalg.inv(HAHt+R_obs)
+    #        #print((C*A).shape,H_obs.T.shape,np.dot(C*A,H_obs.T).shape,HAHtRinv.shape)
+    ###
+
+    #        if explicit_sens_flag ==1: 
+    #        
+    #            K = np.dot(C*A,H_obs.T)*HAHtRinv
+    #            # Update state vector
+    #            Hdx = np.matmul(H_obs,dx)
+    #            dx_prime = dx - alpha*np.outer(K,Hdx)
+    #        
+    #            #Update expected J for each ensemble member
+    #            estimated_J = estimated_J -np.dot(dJdx_inv,alpha*np.outer(K,Hdx))
+
+    #            #A_new = np.dot(dx,dx.T)/(dx.shape[1]-1)
+    #            #Now we get the variance estimate using the difference between new and old B
+    #            #dSigma = np.matmul(A_new-A,dJdx_inv.T)
+    #            #print('wtf: np.sum(np.abs(A_new-A_old))  ',o,np.sum(np.abs(A_new-A)))
+    #            #dSigma = np.matmul(dJdx_inv,dSigma)
+    #            #vr_individual[o] = dSigma#np.matmul(np.atleast_2d(np.diag(A_new-A)),(dJdx_inv.T**2))
+    #            
+    #            vr_individual[o] = np.var(np.dot(dJdx_inv.T,dx_prime),ddof=1)-np.var(np.dot(dJdx_inv.T,dx),ddof=1)
+    #            dx = dx_prime
+    #        if explicit_sens_flag==0:
+    #            dJHdxt           = np.dot(dJ,np.dot(H_obs,dx).T)/(nens-1)
+    #            vr_individual[o] = -np.dot(dJHdxt,np.dot(HAHtRinv,dJHdxt))
+    #            
+    #            #Still needs localization! 
+    #            
+    #            #Now we include dJ into dx to include it in the update
+    #            dxJ = np.vstack([dx,dJ])
+    #            AxJ = np.dot(dxJ,dxJ.T)/(dxJ.shape[1]-1)
+    #            HxJ = np.hstack([H_obs,np.array(0)])
+    #            CxJ = np.ones([nstate+1,nstate+1])
+    #            CxJ[:nstate,:nstate] = C
+    #            HAHt = np.dot(HxJ,np.dot(CxJ*AxJ,HxJ.T))
+    #            HAHtRinv= np.linalg.inv(HAHt+R_obs)
+
+    #            #print(AxJ.shape,CxJ.shape)
+    #            #print(AxJ.shape,HxJ.T.shape,np.dot(AxJ,HxJ.T).shape,HAHtRinv.shape)
+    #            K = np.dot(CxJ*AxJ,HxJ.T)*HAHtRinv
+
+
+    #            # Update state vector
+    #            HdxJ = np.matmul(HxJ,dxJ)
+    #            dxJ = dxJ - alpha*np.outer(K,HdxJ)
+
+    #            dx = dxJ[:-1,:]
+    #            #old_var_dJ = np.var(dJ)
+    #            dJ = dxJ[-1,:]
+    #            estimated_J = dJ+np.mean(bf_response)
+    #            #new_var_dJ = np.var(dJ)
+    #        
+    #    #Final var reduciton estimate
+    #    if explicit_sens_flag==0: vr_total=np.sum(vr_individual)
+    #    
+    #    if explicit_sens_flag==1: 
+    #        vr_total=np.var(np.dot(dJdx_inv.T,dx),ddof=1)-np.var(np.dot(dJdx_inv.T,dx_orig),ddof=1)
+    #        
+    #        #Checking different formulation
+
+
+    #
+    
+    J_dict['es'] =  estimated_J
+    
+    J_fc= fc_response
+    dJ_fc = J_fc-np.mean(J_fc)
+    real_reduction=np.var(dJ_fc,ddof=1) - np.var(dJ_orig,ddof=1)
+        
+    return vr_total,vr_individual,real_reduction,J_dict,dJdx_inv,quad_state,dx
+
+def vr_individual_loc_22(background,truth,m_const,da_const,sat_operator,response_func=1,abs_flag=0,
+                quad_state = None,exp_num=0,
+                advect_flag = 0,obs_seed=0,model_seed=0,
+                random_samples=0,seed_samples=0,error_u=100,error_u_ens=100):
+
+    """
+    Based on vr_individual_loc, but adapted to take sat measurements as well.
+    The main purpose is to do variance reduction estimates by individually localizing the covariances of the response functions at each point.
+    
+    important parameters:
+    response_func: controls the response, 1 results in the default sum over the mean, 2 in the mean over the mean
+    abs_flag: determines if the absolute value of the state vector is used for the response function.
+    
+    If advect_flag==1, the localization matrix is advected using the mo_const advection speed which should be close to the ensemble mean. The same advection function used as in the linear advection model. 
+    If advect_flag==2, the localization matrix is advected using the individual ensemble members, so that in the end the localization vector is broadened accoring to ensemble disperstion.
+     
+    random_samples=0,seed_samples=0,error_u=100,error_u_ens=100: these all are various ways of adding an error to the advection term.
+    
+ 
+    For now only using the all at once method, and it probably won't work for only a single observation.   
+    
+    The equations should be in the paper draft, but I made quite the mistake by thinking that the A at the end of A'-A = -KHA is also localized.
+    
+    Possible speed ups not used so far:
+    * I could speed this up a bit by only calculating the localized correlations where they will be needed (so where H says they should be), but that is a bit of work to do well so for now I will leave it out.  
+    * Given that we use Gaspari-Cohn localization most of the time, I might be able to save some time by only calculating covariances where C_i is not zero. But probably won't make a difference for toy model
+    * We currently assign j_i a value at every grid point, even though we skip calculations where response_s = 0. It would be more memory efficient to allocate J_i in a restricted area, but again I think this won't matter in a toymodel 
+    * Possibly the biggest speed up would be to avoid the loop when calculating the response functions, which I haven't bothered with yet. 
+    
+
+
+    """
+    ##########################################################################
+    #We begin by computing the vecors which determine the response function
+    ###########################################################################
+    response_s = np.zeros(m_const['nx'])
+    response_c = np.ones(m_const['nx'])
+    nx = m_const['nx']
+    if response_func ==1: 
+        idx_str = int(nx/3.)
+        idx_end = int(2*nx/3.)
+        response_s[idx_str:idx_end] = 1
+    if response_func ==2: 
+        idx_str = int(nx/3.)
+        idx_end = int(2*nx/3.)
+        response_s[idx_str:idx_end] = 1./np.float(len(response_s[idx_str:idx_end]))
+    
+    
+    
+    
+    ###########################################################################
+    #First, need we calculate the quad (analysis, forecast, blind forecast)
+    #A precomputed quad_state can be supplied to quickly use different response functions
+    ###########################################################################
+    if type(quad_state)== type(None):
+        quad_state = single_step_analysis_forecast_22(background,truth,da_const,m_const,sat_operator,obs_seed=obs_seed,model_seed=model_seed)
+
+    ###########################################################################
+    # Next we need the response functions. 
+    # We only really need the forecast and blind forecast, but for fun I'll calculate all for now
+    ###########################################################################
+
+    #For now I am not worried about being efficient
+    nobs = da_const["n_obs_h"] + da_const["n_obs_sat"]
+    obs = np.arange(nobs)
+    nens = da_const["nens"]
+    nstate = len(truth)
+
+    bf_response = np.zeros(nens)
+    fc_response = np.zeros(nens)
+    an_response = np.zeros(nens)
+    bg_response = np.zeros(nens)
+    for n in range(da_const["nens"]):
+        if abs_flag==0:
+            bf_response[n] = np.sum(response_s * np.power(quad_state["bf"][:,n],response_c)) 
+            fc_response[n] = np.sum(response_s * np.power(quad_state["fc"][:,n],response_c))
+            an_response[n] = np.sum(response_s * np.power(quad_state["an"][:,n],response_c))
+            bg_response[n] = np.sum(response_s * np.power(quad_state["bg"][:,n],response_c))
+        if abs_flag==1:
+            bf_response[n] = np.sum(response_s * np.power(np.abs(quad_state["bf"][:,n]),response_c)) 
+            fc_response[n] = np.sum(response_s * np.power(np.abs(quad_state["fc"][:,n]),response_c))
+            an_response[n] = np.sum(response_s * np.power(np.abs(quad_state["an"][:,n]),response_c))
+            bg_response[n] = np.sum(response_s * np.power(np.abs(quad_state["bg"][:,n]),response_c))
+
+    J_dict = {}
+    J_dict['bf']  = bf_response
+    J_dict['bg']  = bg_response
+    J_dict['an']  = an_response
+    J_dict['fc']  = fc_response
+    J_dict['tr_bg']  = np.sum(response_s * np.power(quad_state["tr_bg"][:],response_c))
+    if abs_flag==1:
+        J_dict['tr_bg']  = np.sum(response_s * np.power(np.abs(quad_state["tr_bg"][:]),response_c))
+
+    ###########################################################################
+    # Creating the localization matrix which we will need for the VR estimate 
+    ###########################################################################
+
+
+    x = quad_state['bg'][:,:]
+
+    dx = x.T-np.mean(x,axis=1)
+    dx = dx.T
+    dx_obs,x_obs_ol = state_to_observation_space(x,m_const,da_const,sat_operator)
+
+    A = np.dot(dx,dx.T)/(dx.shape[1]-1)
+
+    #Now we need the vector of individual js for all ensembles
+    J = np.zeros_like(dx)
+    for n in range(da_const["nens"]):
+        if abs_flag==0:
+            J[:,n] = response_s * np.power(quad_state["bf"][:,n],response_c)
+        if abs_flag==1:
+            J[:,n] = response_s * np.power(np.abs(quad_state["bf"][:,n]),response_c)
+        #J[:,n] = response_s * np.power(quad_state["bg"][:,n],response_c)
+    dJ      = J.T-np.mean(J,axis=1)
+    dJ      = dJ.T
+
+    dJ_sum = bf_response - np.mean(bf_response)
+
+    L_obs, L_obs_state = localization_matrices_observation_space(m_const,da_const)
+
+    # calculating HAHt with the background deviations in obs space
+    HAHt = L_obs*np.dot(dx_obs,dx_obs.T)/(nens-1)
+    HAHtRinv= np.linalg.inv(HAHt+da_const['R'])
+    # Again replacing Hdx with the background deviations in obs space
+    dJHdxt           =  np.dot(dJ_sum,dx_obs.T)/(nens-1)
+
+     #missing is all the advection shit, for now we use only a prescribed error
+    if advect_flag ==1:
+        u_advect =m_const['u_ref']*error_u/100.
+        for o in range(nobs):
+            L_obs_state[:,o] = semi_lagrangian_advection(L_obs_state[:,o],m_const['dx'],u_advect,da_const['dt'])
+
+
+
+    dJdYT=np.dot(dJ,dx_obs.T)/(nens-1)
+    LdJdYT=np.sum(L_obs_state*dJdYT,axis=0)
+
+    vr_total = - np.dot(LdJdYT,np.dot(HAHtRinv,dJHdxt).T)
+    J_fc= fc_response
+    dJ_fc = J_fc-np.mean(J_fc)
+    real_reduction=np.var(dJ_fc,ddof=1) - np.var(bf_response,ddof=1)
+
+    return vr_total,real_reduction,quad_state,J_dict#,C
+    #return dJ,dx_obs,dx
+
+   # # calculating HAHt with the background deviations in obs space
+   # HAHt = L_obs*np.dot(dx_obs,dx_obs.T)/(nens-1)
+   # HAHtRinv= np.linalg.inv(HAHt+da_const['R'])
+   # 
+   # CdJdX = np.zeros(m_const['nx'])
+   # 
+   # #Since we no longer need C for the HCAHT which was calculated above, we can just advect the C matrix for the correlations and not make a new one. 
+   # if advect_flag > 0:
+   #     if advect_flag ==1:
+   #         u_advect =m_const['u_ref']*error_u/100.
+   #         C[:,0] = semi_lagrangian_advection(C[:,0],m_const['dx'],u_advect,da_const['dt'])
+   #     #for c in range(C.shape[0]):
+   #     #    C[c,:] = semi_lagrangian_advection(C[c,:],m_const['dx'],-m_const['u_ref'],da_const['dt'])
+   # 
+   #     if advect_flag ==2:
+   #         C_adv = C[:,0]*0.
+   #         if random_samples == 0:
+   #             for i in range(da_const["nens"]):
+   #                 if da_const["fixed_seed"]==True: np.random.seed(i)
+   #                 u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+   #                 u_ens      = m_const["u_ref"]*error_u/100.+(u_ens-m_const["u_ref"])*error_u_ens/100.
+   #                 #print('u_ens',u_ens)
+   #                 C_adv = C_adv + semi_lagrangian_advection(C[:,0],m_const['dx'],u_ens,da_const['dt'])
+   #                 #C_adv[:,0] = C_adv[:,0] + semi_lagrangian_advection(C[:,0],m_const['dx'],10.,da_const['dt'])
+   #             C[:,0] = C_adv/np.float(da_const["nens"])
+   #         else:   
+   #             np.random.seed(random_samples+seed_samples)
+   #             randomized_ens = np.random.choice(np.arange(0,da_const["nens"]), random_samples, replace=False)
+   #             for r in randomized_ens:
+   #                 #print(r)
+   #                 if da_const["fixed_seed"]==True: np.random.seed(r)
+   #                 u_ens      = np.random.normal(m_const["u_ref"],da_const["u_std_ens"])
+   #                 u_ens      = m_const["u_ref"]*error_u/100.+(u_ens-m_const["u_ref"])*error_u_ens/100.
+   #                 #print('u_ens',u_ens)
+   #                 C_adv = C_adv + semi_lagrangian_advection(C[:,0],m_const['dx'],u_ens,da_const['dt'])
+   #                 #C_adv[:,0] = C_adv[:,0] + semi_lagrangian_advection(C[:,0],m_const['dx'],10.,da_const['dt'])
+   #             C[:,0] = C_adv/np.float(random_samples)
+   #     
+   #     for i in range(1,m_const['nx']):
+   #         C[:,i] = np.hstack([C[-1,i-1],C[:-1,i-1]])
+   # 
+   # 
+   # #for n in range(m_const["nx"]): #Not sure if the if loop is much quicker than just calculating the dot product everywhere ;) 
+   # #    if response_s[n] != 0:        CdJdX += C[n,:]*np.dot(dJ[n,:],dx.T)/(da_const['nens']-1)
+   # #  Somewhat quicker way that avoids the loop
+   # CdJdX=np.sum(C*np.dot(dJ,dx.T)/(da_const['nens']-1.),axis=0)
+   #
+   # #This is where shit gets real!
+   # CdJdXH = np.dot(CdJdX,H.T)
+   # vr_total = - np.dot(CdJdXH,np.dot(HAHtRinv,dJHdxt).T)
+   # J_fc= fc_response
+   # dJ_fc = J_fc-np.mean(J_fc)
+   # real_reduction=np.var(dJ_fc,ddof=1) - np.var(bf_response,ddof=1)
+
+   #     
+   # 
diff --git a/model_functions.py b/model_functions.py
index 26505af6caddbdb8b22448801883c554ff1a3fec..c6b6ccd9220458a77e6abaa379ecdc38e6a4124b 100644
--- a/model_functions.py
+++ b/model_functions.py
@@ -3,6 +3,34 @@
 
 import numpy as np
 
+def set_model_constants_22(nx=300,dx=100,u=10,u_std=0,dhdt=0,dhdt_std=0,h_init_std = 3000, init_func='gaus',sine_init=[1],truth_seed=0):
+    """
+    Sets all the constants used to describe the model. The 2022 version (_22) is only new in that a truth seed is added so that 
+    truth perturbations can be added systematically, but that hasn't been tested yet anyway.
+
+    """
+    const = {}
+    
+    #grid
+    const["nx"] = nx
+    const["dx"] = dx
+    const["x_grid"]  = np.arange(nx)*dx
+    
+    #properties of truth wave
+    const["h_init_std"]      = h_init_std  # width of the initial gaussian distribution, or number of sine waves  
+    const["sine_init"]       = sine_init  # width of the initial gaussian distribution, or number of sine waves  
+    const["init_func"]       = init_func   # currently a gaussian ('gaus') or a sine wave ('sine') are available
+    const["u_ref"]           = u           # reference u around which ensemble is generated, truth can 
+    const["u_std_truth"]     = u_std       # standard deviation of u for truth 
+    const["dhdt_ref"]        = dhdt        # reference dhdt around which ensemble is generated
+    const["dhdt_std_truth"]  = dhdt_std   # standard deviation of dhdt for truth
+    const["truth_seed"]      = truth_seed   # added to seed used to generate truth deviations
+    
+    #model
+    const["model"] = 'LA'
+
+    return const
+
 def set_model_constants(nx=101,dx=100,u=2.33333333,u_std=0,dhdt=0,dhdt_std=0,h_init_std = 500, init_func='gaus',sine_init=[1]):
     """
     Sets all the constants used to describe the model.
diff --git a/plot_functions.py b/plot_functions.py
index ae06f44ad2271e7e4491cd314af3bcbe0478fd30..a14d7180ace7e761407d3b79453495f33ad9f278 100644
--- a/plot_functions.py
+++ b/plot_functions.py
@@ -6,6 +6,8 @@ import seaborn as sns
 import matplotlib.pyplot as plt
 import numpy as np
 
+from da_functions import *
+
 def ensemble_plotter(states,m_const,da_const,j=0,t_start=0,t_end=0):
     """
     Plots the full forecast/background ensemble with observations, as well as the resulting analysis.
@@ -119,6 +121,67 @@ def quad_plotter(quad_state,m_const,da_const):
     ax[1,0].legend()
     return fig,ax
 
+def quad_plotter_v2(quad_state,m_const,da_const):
+    """
+    Plots the initial background and blind forecast, as well as the analysis and forecast with observations 
+ 
+    Input: 
+    j : experiment number
+    t_start and t_end: time frame to plot
+
+    Returns: 
+    figure and axes
+    """
+    sns.set()
+    sns.set_style('whitegrid')
+
+
+    alpha = np.sqrt(1/da_const['nens'])+0.1
+    
+    fig, ax = plt.subplots(2,2,figsize=(7.5,4),sharex='all',sharey='all')
+  
+    
+    
+    for i in range(da_const["nens"]):
+        ax[0,0].plot(m_const['x_grid']/1000.,quad_state['bg'][:,i],'r',alpha =alpha,zorder=1)
+        ax[0,1].plot(m_const['x_grid']/1000.,quad_state['bf'][:,i],'b',alpha =alpha,zorder=1)
+        ax[1,0].plot(m_const['x_grid']/1000.,quad_state['an'][:,i],'magenta',alpha =alpha,zorder=1)
+        ax[1,1].plot(m_const['x_grid']/1000.,quad_state['fc'][:,i],'c',alpha =alpha,zorder=1)
+    
+
+    ax[0,0].plot(m_const["x_grid"]/1000.,quad_state['tr_bg'],'k',zorder=10,label='truth')
+    ax[1,0].plot(m_const["x_grid"]/1000.,quad_state['tr_bg'],'k',zorder=10,label='truth')
+    #ax[0,1].plot(m_const["x_grid"],quad_stat/1000.e['tr_fc'],'k',zorder=10,label='truth')
+    #ax[1,1].plot(m_const["x_grid"],quad_stat/1000.e['tr_fc'],'k',zorder=10,label='truth')
+    
+    ax[0,0].plot(m_const['x_grid']/1000.,np.mean(quad_state['bg'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,0].plot(m_const['x_grid']/1000.,np.mean(quad_state['an'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[0,1].plot(m_const['x_grid']/1000.,np.mean(quad_state['bf'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,1].plot(m_const['x_grid']/1000.,np.mean(quad_state['fc'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+
+    ax[0,0].scatter(m_const["x_grid"][da_const["obs_loc"]]/1000.,quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+    ax[1,0].scatter(m_const["x_grid"][da_const["obs_loc"]]/1000.,quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+    
+    
+    ax[0,0].set_title('background with observations')
+    ax[0,1].set_title('free-forecast')
+    ax[1,0].set_title('analysis with observations')
+    ax[1,1].set_title('forecast')
+    ax[0,1].set_xlim([m_const["x_grid"][0],m_const["x_grid"][-1]/1000])
+    plt.subplots_adjust(wspace=0.03,hspace=0.20)
+    ax[1,1].set_xlabel('x [km]')
+    ax[1,0].set_xlabel('x [km]')
+    ax[0,0].set_ylabel('h [m]')
+    ax[1,0].set_ylabel('h [m]')
+    ax[1,0].legend()
+    
+    # now to add in shading for response domain
+    ylimits = ax[0,0].get_ylim()
+    ax[0,1].fill_between([10,20], ylimits[0], y2=ylimits[1],color='grey',alpha=0.2,zorder=-1)
+    ax[1,1].fill_between([10,20], ylimits[0], y2=ylimits[1],color='grey',alpha=0.2,zorder=-1)
+    ax[1,1].set_ylim(ylimits)
+    
+    return fig,ax
 
 def B_plotter(states,j=0,ncyc=0,matrix='bg'):
     """
@@ -314,3 +377,260 @@ def _wedge_dir(vertices, direction):
     # if the first couple x/y values after the start are the same, it
     #  is the input direction. If not, it is the opposite
     return outcome_key[result]
+
+
+#Little subplot labeling script found online from https://gist.github.com/tacaswell/9643166
+#Put here for no good reason
+#Warning, if you define your colorbar using an additional axes it will give that a label as well, which is pretty funny but also very annoying. 
+
+import string
+from itertools import cycle
+from six.moves import zip
+def label_axes_abcd(fig, labels=None, loc=None, **kwargs):
+    """
+    Walks through axes and labels each.
+    kwargs are collected and passed to `annotate`
+    Parameters
+    ----------
+    fig : Figure
+         Figure object to work on
+    labels : iterable or None
+        iterable of strings to use to label the axes.
+        If None, lower case letters are used.
+    
+    loc : len=2 tuple of floats
+        Where to put the label in axes-fraction units
+    """
+    if labels is None:
+        labels = string.ascii_lowercase
+    
+    # re-use labels rather than stop labeling
+    labels = cycle(labels)
+    if loc is None:
+        loc = (.9, .9)
+    for ax, lab in zip(fig.axes, labels):
+        ax.annotate(lab, xy=loc,
+                    xycoords='axes fraction',
+                    **kwargs)
+###########################################################################################
+# 2022 baby, now with sat data
+###########################################################################################
+
+
+def ensemble_plotter_22(states,m_const,da_const,j=0,t_start=0,t_end=0,h_c=None):
+    """
+    Plots the full forecast/background ensemble with observations, as well as the resulting analysis.
+    Plots all timesteps from t_start till t_end. 
+    Initial starting conditions have no background state
+    
+    
+ 
+    Input: 
+    j : experiment number
+    t_start and t_end: time frame to plot
+
+    Returns: 
+    figure and axes
+    """
+    sns.set()
+    sns.set_style('whitegrid')
+    nc = da_const['ncyc']
+    if t_end ==0: t_end = nc
+
+    na = t_end-t_start
+
+    alpha = np.sqrt(1/da_const['nens'])
+    
+    fig, ax = plt.subplots(na,2,figsize=(7.5,na*1.5),sharex='all',sharey='all')
+  
+    #left bg, right analysis
+    
+    
+    #for c in range(nc):
+    for c in range(na):
+        for i in range(da_const["nens"]):
+            ax[c,0].plot(m_const['x_grid'],states[j]['bg'][c+t_start][:,i],'r',alpha =alpha,zorder=1)
+            ax[c,1].plot(m_const['x_grid'],states[j]['an'][c+t_start][:,i],'magenta',alpha =alpha,zorder=1)
+        ax[c,0].plot(m_const['x_grid'],np.mean(states[j]['bg'][c+t_start][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+        ax[c,0].plot(m_const["x_grid"],states[j]['truth'][c+t_start],'k',zorder=10,label='truth')
+        
+        ax[c,1].plot(m_const['x_grid'],np.mean(states[j]['an'][c+t_start][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+        ax[c,1].plot(m_const["x_grid"],states[j]['truth'][c+t_start],'k',zorder=10,label='truth')
+        
+        if da_const['n_obs_h']>0:
+            ax[c,0].scatter(m_const["x_grid"][da_const["obs_loc"]],states[j]['obs'][c+t_start][da_const["obs_loc"]],zorder=3,label='observations',c='k')
+            ax[c,1].scatter(m_const["x_grid"][da_const["obs_loc"]],states[j]['obs'][c+t_start][da_const["obs_loc"]],zorder=3,label='observations',c='k')
+        if h_c is not None:
+            ax[c,0].hlines(h_c,m_const['x_grid'][0],m_const['x_grid'][-1],color='k',ls=':',label='sat threshold')
+            ax[c,1].hlines(h_c,m_const['x_grid'][0],m_const['x_grid'][-1],color='k',ls=':',label='sat threshold')
+
+        #if da_const['n_obs_sat']>0:
+        #    #sat obs location will be added as well, I'll add them in with their value for now Maybe I'll get fancy and use a full circle where cloudy and empty circle where empty, because why not
+        #    ax[c,0].scatter(m_const["x_grid"][da_const["obs_loc_sat"]],states[j]['obs_sat'][c+t_start][da_const["obs_loc_sat"]],zorder=3,label='sat obs loc',marker='x',c='k')
+        #    ax[c,1].scatter(m_const["x_grid"][da_const["obs_loc_sat"]],states[j]['obs_sat'][c+t_start][da_const["obs_loc_sat"]],zorder=3,label='sat obs loc',marker='x',c='k')
+        ax[c,0].set_ylabel('assim step '+str(c+t_start) + '\n h [m]')
+        
+    
+    ax[0,0].set_title('background ensemble and observations')
+    ax[0,1].set_title('analysis ensemble after update')
+    ax[0,1].set_xlim([m_const["x_grid"][0],m_const["x_grid"][-1]])
+    plt.subplots_adjust(wspace=0.01,hspace=0.01)
+#     ax[0].set_xticklabels([])
+    ax[na-1,1].set_xlabel('x [m]')
+    ax[na-1,0].set_xlabel('x [m]')
+    plt.legend()
+    return fig,ax
+
+
+
+
+def quad_plotter_22(quad_state,m_const,da_const):
+    """
+    Plots the initial background and blind forecast, as well as the analysis and forecast with observations.
+
+    Only difference to previous versions is that it can deal with no obs being present
+ 
+
+    Returns: 
+    figure and axes
+    """
+    sns.set()
+    sns.set_style('whitegrid')
+
+
+    alpha = np.sqrt(1/da_const['nens'])+0.1
+    
+    fig, ax = plt.subplots(2,2,figsize=(7.5,4),sharex='all',sharey='all')
+  
+    
+    
+    for i in range(da_const["nens"]):
+        ax[0,0].plot(m_const['x_grid']/1000.,quad_state['bg'][:,i],'r',alpha =alpha,zorder=1)
+        ax[0,1].plot(m_const['x_grid']/1000.,quad_state['bf'][:,i],'b',alpha =alpha,zorder=1)
+        ax[1,0].plot(m_const['x_grid']/1000.,quad_state['an'][:,i],'magenta',alpha =alpha,zorder=1)
+        ax[1,1].plot(m_const['x_grid']/1000.,quad_state['fc'][:,i],'c',alpha =alpha,zorder=1)
+    
+
+    ax[0,0].plot(m_const["x_grid"]/1000.,quad_state['tr_bg'],'k',zorder=10,label='truth')
+    ax[1,0].plot(m_const["x_grid"]/1000.,quad_state['tr_bg'],'k',zorder=10,label='truth')
+    #ax[0,1].plot(m_const["x_grid"],quad_stat/1000.e['tr_fc'],'k',zorder=10,label='truth')
+    #ax[1,1].plot(m_const["x_grid"],quad_stat/1000.e['tr_fc'],'k',zorder=10,label='truth')
+    
+    ax[0,0].plot(m_const['x_grid']/1000.,np.mean(quad_state['bg'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,0].plot(m_const['x_grid']/1000.,np.mean(quad_state['an'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[0,1].plot(m_const['x_grid']/1000.,np.mean(quad_state['bf'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+    ax[1,1].plot(m_const['x_grid']/1000.,np.mean(quad_state['fc'][:,:],axis=1),'k--',alpha =1,zorder=2,label='ens mean')
+
+    if da_const['n_obs_h']:
+        ax[0,0].scatter(m_const["x_grid"][da_const["obs_loc"]]/1000.,quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+        ax[1,0].scatter(m_const["x_grid"][da_const["obs_loc"]]/1000.,quad_state['obs'][da_const["obs_loc"]],zorder=3,label='observations',color='k')
+    
+    
+    ax[0,0].set_title('background')
+    ax[0,1].set_title('free-forecast')
+    ax[1,0].set_title('analysis')
+    ax[1,1].set_title('forecast')
+    ax[0,1].set_xlim([m_const["x_grid"][0],m_const["x_grid"][-1]/1000])
+    plt.subplots_adjust(wspace=0.03,hspace=0.20)
+    ax[1,1].set_xlabel('x [km]')
+    ax[1,0].set_xlabel('x [km]')
+    ax[0,0].set_ylabel('h [m]')
+    ax[1,0].set_ylabel('h [m]')
+    ax[1,0].legend()
+    
+    # now to add in shading for response domain
+    ylimits = ax[0,0].get_ylim()
+    ax[0,1].fill_between([10,20], ylimits[0], y2=ylimits[1],color='grey',alpha=0.2,zorder=-1)
+    ax[1,1].fill_between([10,20], ylimits[0], y2=ylimits[1],color='grey',alpha=0.2,zorder=-1)
+    ax[1,1].set_ylim(ylimits)
+    
+    return fig,ax
+
+def plot_ensemble_sat_analysis(bg,an,obs,obs_sat,truth,sat_operator,m_const,da_const,h_c=None):
+    fig ,ax = plt.subplots(2,2,figsize=(8,6),sharey='col',sharex='all')
+    """
+    Plots the background and analysis ensemble, as well as the satetlitte obs equivalents. Includes observations as well.
+    
+    Is still quite rough around the edges, and will hopefully be improced upon
+    """
+    #ax[0].plot(m_const['x_grid'],x_ol_a,'r')
+    #ax[0].plot(m_const['x_grid'],x_ol_a_loc[:],'b')
+    #ax[0].plot(m_const['x_grid'],states[0]['truth'][t],'k')
+
+    sat_an    = sat_operator(an)
+    sat_bg    = sat_operator(bg)
+    sat_tr_bg = sat_operator(truth)
+    n_obs_h  =len(da_const["obs_loc"])
+    n_obs_sat =len(da_const["obs_loc_sat"])
+     
+    dY_b, y_ol_b = state_to_observation_space(bg,m_const,da_const,sat_operator)
+    y_b = dY_b.T + y_ol_b
+    y_b = y_b.T
+    
+    # for plotting the pixels
+    window = m_const['x_grid'][da_const["obs_loc_sat"][1]]-m_const['x_grid'][da_const["obs_loc_sat"][0]]
+    xpixmin = m_const['x_grid'][da_const["obs_loc_sat"]]-window/2 
+    xpixmax = m_const['x_grid'][da_const["obs_loc_sat"]]+window/2 
+    
+    for i in range(bg.shape[1]):
+        #ax[0].plot(m_const['x_grid'],x_a[:,i],'r',alpha=0.2)
+        ax[1,0].plot(m_const['x_grid'],an [:,i],'magenta',alpha=0.2)
+        ax[0,0].plot(m_const['x_grid'],bg [:,i],'r',alpha=0.2)
+        
+        # plotting the pixels
+         
+        #ax[1,1].plot(m_const['x_grid'],sat_an [:,i],'magenta',alpha=0.2)
+        #ax[0,1].plot(m_const['x_grid'],sat_bg [:,i],'r',alpha=0.2)
+        ax[1,1].hlines(
+                     sat_an [da_const["obs_loc_sat"],i],
+                     xpixmin,xpixmax,
+                     color='magenta',alpha=0.2,lw=3)
+        ax[0,1].hlines(
+                     sat_bg [da_const["obs_loc_sat"],i],
+                     xpixmin,xpixmax,
+                     color='r',alpha=0.2,lw=3)
+    
+    #ax[0].plot(m_const['x_grid'],x_ol_a,'r')
+    ax[1,0].plot(m_const['x_grid'],truth,'k',linewidth=2)
+    ax[0,0].plot(m_const['x_grid'],truth,'k',linewidth=2)
+    #ax[0,1].hlines(
+    #         sat_tr_bg [da_const["obs_loc_sat"]],
+    #         xpixmin,xpixmax,
+    #         color='k')
+    #ax[1,1].plot(m_const['x_grid'],sat_tr_bg,'k',linewidth=2)
+    #ax[0,1].plot(m_const['x_grid'],sat_tr_bg,'k',linewidth=2)
+
+
+    if n_obs_h>0:
+        ax[1,0].scatter(m_const['x_grid'][da_const['obs_loc']],obs[da_const['obs_loc']],c='k')
+        ax[0,0].scatter(m_const['x_grid'][da_const['obs_loc']],obs[da_const['obs_loc']],c='k')
+    if n_obs_sat>0:
+        ax[1,1].scatter(m_const['x_grid'][da_const['obs_loc_sat']],obs_sat[da_const['obs_loc_sat']],marker='x',s=50,c='k')
+        ax[0,1].scatter(m_const['x_grid'][da_const['obs_loc_sat']],obs_sat[da_const['obs_loc_sat']],marker='x',s=50,c='k')
+
+    ax[0,1].plot(m_const['x_grid'],np.mean(sat_bg,axis=1),'k--',lw=2)
+    #ax[1,1].plot(m_const['x_grid'],np.mean(sat_an,axis=1),'magenta',lw=2)
+    #ax[0,1].plot(m_const['x_grid'],np.mean(sat_bg,axis=1),'r',lw=2)
+    ax[1,1].plot(m_const['x_grid'],np.mean(sat_an,axis=1),'k--',lw=2)
+
+    ax[0,0].plot(m_const['x_grid'],np.mean(bg,axis=1),'k--',lw=2)
+#     ax[0,0].plot(m_const['x_grid'],np.mean(an,axis=1),'magenta',lw=2)
+    ax[1,0].plot(m_const['x_grid'],np.mean(an,axis=1),'k--',lw=2)
+    #ax[1,0].plot(m_const['x_grid'],np.mean(bg,axis=1),'r',lw=2)
+    
+    if h_c is not None:  
+        ax[0,0].hlines(h_c,m_const['x_grid'][0],m_const['x_grid'][-1],color='k',ls=':')
+        ax[1,0].hlines(h_c,m_const['x_grid'][0],m_const['x_grid'][-1],color='k',ls=':')
+
+
+    ax[0,0].set_title('height h')
+    ax[0,1].set_title('reflectance')
+    ax[1,0].set_xlabel('x [m]')
+    ax[1,1].set_xlabel('x [m]')
+    ax[1,1].set_xlim(m_const['x_grid'][0],m_const['x_grid'][-1])
+    #ax[1,0].set_ylabel('Analysis \n  h [m]')
+    #ax[0,1].set_ylabel('Background \n cloud fraction')
+    ax[0,0].set_ylabel('Background')
+    ax[1,0].set_ylabel('Analysis')
+    #plt.subplots_adjust(wspace=0.1,hspace=0.1)
+    return fig, ax
\ No newline at end of file