Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -136,5 +136,8 @@ examples/ex-gwf-zaidel/
examples/freyberg/
examples/sanpedro/
examples/xd_box_chd_ana_working
examples/synthdewater_working
autotest/*_test*/
.figures/
.figures/

.temp/
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ dependencies:
- pyemu
- pyshp
- ruff
- codespell
- pip
- pip:
- git+https://github.com/pypest/pyemu@develop
- codespell
25 changes: 19 additions & 6 deletions examples/clear_output_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,31 @@ def find_files_with_extension(directory, extension):

directory_path = ".."
file_extension = ".ipynb"
exclude_paths = (
".ipynb_checkpoints",
"site-packages",
".temp",
)
notebook_count = 0
try:
files = find_files_with_extension(directory_path, file_extension)
if files:
print(f"Files with extension '{file_extension}' found in '{directory_path}':")
for file in files:
print(f"clearing...{file}")
os.system(
"jupyter nbconvert --ClearOutputPreprocessor.enabled=True "
+ f"--ClearMetadataPreprocessor.enabled=True --inplace {file}"
)
notebook_count += 1
exclude = False
for epath in exclude_paths:
if epath in str(file):
exclude = True
break
if exclude:
print(f"skipping...{file}")
else:
print(f"clearing...{file}")
os.system(
"jupyter nbconvert --ClearOutputPreprocessor.enabled=True "
+ f"--ClearMetadataPreprocessor.enabled=True --inplace {file}"
)
notebook_count += 1
else:
print(
f"No files with extension '{file_extension}' found in '{directory_path}'."
Expand Down
2 changes: 1 addition & 1 deletion examples/sanpedro_demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@
" fig, ax = plt.subplots(1, 1, figsize=(6, 6))\n",
" ax.set_aspect(\"equal\")\n",
"\n",
" cb = ax.pcolormesh(X, Y, arr, cmap=\"plasma\", alpha=0.5)\n",
" cb = ax.pcolormesh(X, Y, arr, cmap=\"viridis\")\n",
" plt.colorbar(cb, ax=ax, label=units)\n",
" ax.pcolormesh(X, Y, ib[k, :, :], cmap=ib_cmap, alpha=0.5)\n",
" ax.pcolormesh(X, Y, sfr_arr)\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/xd_box_chd_ana/xdbox.ims
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ END nonlinear

BEGIN linear
INNER_MAXIMUM 1000
INNER_DVCLOSE 1.00000000E-08
INNER_DVCLOSE 1.00000000E-09
LINEAR_ACCELERATION bicgstab
END linear

4 changes: 2 additions & 2 deletions examples/xdbox_demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -554,9 +554,9 @@
"y_center = [y[i] + delrow / 2.0 for i in range(len(y))]\n",
"y = y[::-1]\n",
"y_center = y_center[::-1]\n",
"vmin, vmax = lamb_ana.min(), lamb_ana.max()\n",
"vmin, vmax = -lamb_ana.max(), -lamb_ana.min()\n",
"contour_intervals = np.arange(vmin, vmax, 0.003)\n",
"plot_colorbar_contour(x, y, lamb_ana, lam_adj[0], vmin, vmax)"
"plot_colorbar_contour(x, y, -lamb_ana, -lam_adj[0], vmin, vmax)"
]
},
{
Expand Down
44 changes: 39 additions & 5 deletions mf6adj/adj.py
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,8 @@ def write_group_to_hdf(hdf, group_name: str, data_dict: dict, attr_dict: dict =
)
else:
raise Exception(
f"unrecognized data_dict entry: {tag},type:{type(item)}"
"Mf6Adj::write_group_to_hdf: unrecognized data_dict entry: "
+ f"{tag}, type: {type(item)}"
)

def _open_hdf(self, tag: str):
Expand Down Expand Up @@ -703,6 +704,8 @@ def _add_gwf_info_to_hdf(self, hdf):
data_dict["iconvert"] = iconvert
storage = PerfMeas.get_ptr_from_gwf(gwf_name, "STO", "SS", gwf)
data_dict["storage"] = storage
sy = PerfMeas.get_ptr_from_gwf(gwf_name, "STO", "SY", gwf)
data_dict["sy"] = sy
nodeuser = PerfMeas.get_ptr_from_gwf(gwf_name, dis_pak, "NODEUSER", gwf) - 1
data_dict["nodeuser"] = nodeuser
nodereduced = (
Expand Down Expand Up @@ -785,25 +788,44 @@ def dresdss_h(
return result

@staticmethod
def drhsdh(gwf_name: str, gwf, dt: float):
def drhsdh(
gwf_name: str,
gwf,
dt: float,
sat_old: np.ndarray,
):
"""partial of the RHS WRT H

Parameters
----------
gwf_name (str) : name of the GWF model
gwf (MODFLOW6 API) : the API instance
dt (float) : length of the current solution step in model time
sat_old (ndarray) : saturation from the last solve

Returns
-------
drhsdh (ndarray) : drhsdh

"""
area = PerfMeas.get_ptr_from_gwf(gwf_name, "DIS", "AREA", gwf)

# specific storage
top = PerfMeas.get_ptr_from_gwf(gwf_name, "DIS", "TOP", gwf)
bot = PerfMeas.get_ptr_from_gwf(gwf_name, "DIS", "BOT", gwf)
area = PerfMeas.get_ptr_from_gwf(gwf_name, "DIS", "AREA", gwf)
storage = PerfMeas.get_ptr_from_gwf(gwf_name, "STO", "SS", gwf)
drhsdh = -1.0 * storage * area * (top - bot) / dt

# specific yield
iconvert = PerfMeas.get_ptr_from_gwf(gwf_name, "STO", "ICONVERT", gwf)
sy = PerfMeas.get_ptr_from_gwf(gwf_name, "STO", "SY", gwf)
sat_old_mod = sat_old.copy()
sat_old_mod[iconvert == 0] = 1.0
sy_mod = sy.copy()
sy_mod[sat_old_mod == 1.0] = 0.0

# calculate drhsdh
drhsdh = -1.0 * area * (storage * (top - bot) + sy_mod) / dt

return drhsdh

def solve_gwf(
Expand Down Expand Up @@ -1052,7 +1074,7 @@ def solve_gwf(
self._gwf_name, self._gwf, head, head_old, dt1, sat, sat_old
)
data_dict["dresdss_h"] = dresdss_h
drhsdh = Mf6Adj.drhsdh(self._gwf_name, self._gwf, dt1)
drhsdh = Mf6Adj.drhsdh(self._gwf_name, self._gwf, dt1, sat_old)
data_dict["drhsdh"] = drhsdh
else:
data_dict["drhsdh"] = np.zeros_like(sat_old)
Expand Down Expand Up @@ -1207,6 +1229,8 @@ def solve_adjoint(
linear_solver_kwargs: dict = {},
use_precon: bool = True,
precon_kwargs: dict = {},
singular_test: bool = False,
tikhonov: float = 0.0,
):
"""Solve for the adjoint state, one performance measure at at time

Expand All @@ -1224,6 +1248,14 @@ def solve_adjoint(
linear solver.
precon_kwargs (dict): dictionary of keyword args to pass to the ilu
preconditioner. Default is {}
singular_test (bool): flag to test for a singular matrix and if the matrix
is determined to be singular apply Tikhonov regularization.
Default is False since there is a non-significant cost to test if a
matrix is singular.
tikhonov (float) : Tikhonov regularization value. This can be used to stabilize
the adjoint solve but introduces an approximation and should
be used cautiously. Small values (for example, 1e-6) have been found to
be effective. Default is 0.0

Returns
-------
Expand All @@ -1246,6 +1278,8 @@ def solve_adjoint(
linear_solver_kwargs=linear_solver_kwargs,
use_precon=use_precon,
precon_kwargs=precon_kwargs,
singular_test=singular_test,
tikhonov=tikhonov,
)
dfs[pm.name] = df
return dfs
Expand Down
Loading