Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • griewankp99/linear-advection-da-toymodel
1 result
Show changes
Commits on Source (2)
This diff is collapsed.
......@@ -2821,3 +2821,167 @@ def vr_individual_loc_22(background,truth,m_const,da_const,sat_operator,response
#
#
########################################################################################################################
# 2023
########################################################################################################################
def vr_reloaded_22_locsens(background,truth,m_const,da_const,sat_operator,
func_J=sum_mid_tri,
sens_loc_flag=0,sens_loc_length = 2000,sens_loc_adv_error=100,
reduc = 1,reg_flag=1,
quad_state = None,dJdx_inv=None,alpha=0.01,mismatch_threshold=0.1,
iterative_flag=0,explicit_sens_flag = 1,exp_num=0,obs_seed=0,model_seed=0):
"""
Version of vr_reloaded_22 that includes the possibility to apply localization to the the sensitivity.
Takes only the background and truth, then caculates the quad, finaly calculates the response function and the variance reduction.
The quad can be supplied to save time, which makes sense when you comparing different methods for the same experiment calculating.
Should also return the dJ values of all 4 ensembles (analysis, background, forecast, blind forecast)
-reg_flag added to use L2 regularization
not implemented:
- state model reduction
Speed up options:
- If the same forecast can be recycled, the quad can be calculated once and than passed on.
- If the sensitivity can be recycled, that can be calculated once and then reused.
- Don't use LETKF
First getting the explicit all at once verion working, will then add the various approaches
to be implemented:
-iteratively and all at once
-explicit vs implicit
-reduced model spacing by subsampling the grid. Reduc is the spacing, eg reduc 3 means every third grid point is used.
should find the nearest reduced model grid point for each observation.
"""
if iterative_flag ==0: from scipy.linalg import sqrtm
###########################################################################
#First, need we calculate the quad (analysis, forecast, blind forecast)
#A precomputed quad_state can be supplied to quickly use different response functions
###########################################################################
if type(quad_state)== type(None):
quad_state = single_step_analysis_forecast_22(background,truth,da_const,m_const,sat_operator,obs_seed=obs_seed,model_seed=model_seed)
###########################################################################
# Next we need the response functions.
# We only really need the forecast and blind forecast, but for fun I'll calculate all for now
###########################################################################
#For now I am not worried about being efficient
nobs = len(da_const["obs_loc"])
obs = np.arange(nobs)
nens = da_const["nens"]
nstate = len(truth)
bf_response = np.zeros(nens)
fc_response = np.zeros(nens)
an_response = np.zeros(nens)
bg_response = np.zeros(nens)
for n in range(da_const["nens"]):
bf_response[n] = func_J(quad_state["bf"][:,n])
fc_response[n] = func_J(quad_state["fc"][:,n])
an_response[n] = func_J(quad_state["an"][:,n])
bg_response[n] = func_J(quad_state["bg"][:,n])
J_dict = {}
J_dict['bf'] = bf_response
J_dict['bg'] = bg_response
J_dict['an'] = an_response
J_dict['fc'] = fc_response
J_dict['tr_bg'] = func_J(quad_state['tr_bg'])
x = quad_state['bg'][:,:]
dx = x.T-np.mean(x,axis=1)
dx = dx.T
dx_orig = dx+0
A = np.dot(dx,dx.T)/(dx.shape[1]-1)
J = bf_response
dJ = J-np.mean(J)
dJ_orig = J-np.mean(J)
if sens_loc_flag==1:
X_J =quad_state['bf'][:,:]
dX_J = X_J.T - np.mean(X_J,axis=1)
dX_J = dX_J.T
dji = dX_J*1
dji[0:100,:] = 0.
dji[200:300,:] = 0.
###############################################################################################
# Sensitivity
###############################################################################################
#Covarianz between reponse function dJ and state ensemble dx
cov_dJdx_vec = np.dot(dJ,dx.T)/(dx.shape[1]-1)
if explicit_sens_flag==1:
#If a sensitivity is provided it is used instead of calculating it
if type(dJdx_inv) == np.ndarray:
#testing supplied sensitivity
rel_error_sens = np.sum(np.abs(A.dot(dJdx_inv)-cov_dJdx_vec))/np.sum(np.abs(cov_dJdx_vec))
if rel_error_sens>0.05:
print('using supplied sensitivity has a relative error of:',rel_error_sens)
#Computing the sensitivity, highly recommend using the regularized version before doing so.
else:
if reg_flag == 1:
dJdx_inv = L2_regularized_inversion(A,cov_dJdx_vec,alpha=alpha,mismatch_threshold=mismatch_threshold)
else:
A_inv = np.linalg.pinv(A)
dJdx_inv = np.dot(A_inv,cov_dJdx_vec)
if sens_loc_flag==1:
da_const_wide = da_const.copy()
da_const_wide['loc_length'] = sens_loc_length
C_sens = loc_matrix(da_const_wide,m_const)
C_adv = C_sens*1.
for nn in range(m_const['nx']):
C_adv[:,nn] =semi_lagrangian_advection(C_sens[:,nn],m_const['dx'],+m_const['u_ref']*sens_loc_adv_error/100. ,da_const['dt'])
sum_loc_cov_adv_djidX=np.sum(C_adv*np.dot(dji,dx.T),axis=0)/(da_const['nens']-1)
dJdx_inv = L2_regularized_inversion(C_sens*A,sum_loc_cov_adv_djidX,alpha=alpha)
estimated_J = bf_response + 0.
#if iterative_flag ==0:
vr_individual = 0.
if explicit_sens_flag ==1:
#Tanjas approach of calculating the square root K following Kalman gain, formula (10) Whitaker and Hamil 2002
sqK = square_root_Kalman_gain_observation_deviations(x,m_const,da_const,sat_operator)
bg_obs_deviations,bg_obs_ol = state_to_observation_space(x,m_const,da_const,sat_operator)
dx_prime = dx - np.dot(sqK,bg_obs_deviations)
#estimated J calculated by updating the original dJ
estimated_J = estimated_J -np.dot(dJdx_inv,np.dot(sqK,bg_obs_deviations))
#Using the cheaper variance calculation instead of going to A-B
new_J = np.dot(dJdx_inv.T,dx_prime)
vr_total = np.var(new_J,ddof=1)-np.var(np.dot(dJdx_inv.T,dx),ddof=1)
#print('all at once:',vr_individual)
dx = dx_prime
J_dict['es'] = estimated_J
J_fc= fc_response
dJ_fc = J_fc-np.mean(J_fc)
real_reduction=np.var(dJ_fc,ddof=1) - np.var(dJ_orig,ddof=1)
return vr_total,vr_individual,real_reduction,J_dict,dJdx_inv,quad_state,dx
\ No newline at end of file
File added
File added
File added
File added
File added