From 6a36b4d4b7228b0d73add7efb677af8524d54a17 Mon Sep 17 00:00:00 2001 From: asteppke Date: Thu, 13 Feb 2025 17:00:20 +0100 Subject: [PATCH 1/2] updated regularization for scipy >=1.12 --- .../__chartrand_tvregdiff__.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pynumdiff/total_variation_regularization/__chartrand_tvregdiff__.py b/pynumdiff/total_variation_regularization/__chartrand_tvregdiff__.py index 8b35909..2e226a6 100644 --- a/pynumdiff/total_variation_regularization/__chartrand_tvregdiff__.py +++ b/pynumdiff/total_variation_regularization/__chartrand_tvregdiff__.py @@ -228,7 +228,7 @@ def AT(w): return (sum(w) * np.ones(n + 1) - g = AT(A(u)) + ATb + alph * L * u # Prepare to solve linear equation. - tol = 1e-4 + rtol = 1e-4 # Simple preconditioner. P = alph * sparse.spdiags(L.diagonal() + 1, 0, n + 1, n + 1) @@ -237,8 +237,8 @@ def linop(v): return (alph * L * v + AT(A(v))) if diagflag: [s, info_i] = sparse.linalg.cg( - linop, g, x0=None, tol=tol, maxiter=maxit, callback=None, - M=P, atol='legacy') + linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, + M=P, atol=0) #print('iteration {0:4d}: relative change = {1:.3e}, ' # 'gradient norm = {2:.3e}\n'.format(ii, # np.linalg.norm( @@ -251,8 +251,8 @@ def linop(v): return (alph * L * v + AT(A(v))) # print("WARNING - illegal input or breakdown") else: [s, info_i] = sparse.linalg.cg( - linop, g, x0=None, tol=tol, maxiter=maxit, callback=None, - M=P, atol='legacy') + linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, + M=P, atol=0) # Update solution. u = u - s # Display plot. @@ -300,7 +300,7 @@ def AT(w): return (sum(w) * np.ones(len(w)) - # droptol = 1.0e-2 R = sparse.dia_matrix(np.linalg.cholesky(B.todense())) # Prepare to solve linear equation. - tol = 1.0e-4 + rtol = 1.0e-4 def linop(v): return (alph * L * v + AT(A(v))) linop = splin.LinearOperator((n, n), linop) @@ -308,8 +308,8 @@ def linop(v): return (alph * L * v + AT(A(v))) print(maxit) if diagflag: [s, info_i] = sparse.linalg.cg( - linop, -g, x0=None, tol=tol, maxiter=maxit, callback=None, - M=np.dot(R.transpose(), R), atol='legacy') + linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, + M=np.dot(R.transpose(), R), atol=0) print('iteration {0:4d}: relative change = {1:.3e}, ' 'gradient norm = {2:.3e}\n'.format(ii, np.linalg.norm(s[0]) / @@ -322,8 +322,8 @@ def linop(v): return (alph * L * v + AT(A(v))) else: [s, info_i] = sparse.linalg.cg( - linop, -g, x0=None, tol=tol, maxiter=maxit, callback=None, - M=np.dot(R.transpose(), R), atol='legacy') + linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, + M=np.dot(R.transpose(), R), atol=0) # Update current solution u = u + s # Display plot. From c2cbdd0ff82c099af91a0f1cd71030dafea847d1 Mon Sep 17 00:00:00 2001 From: asteppke Date: Thu, 13 Feb 2025 17:10:00 +0100 Subject: [PATCH 2/2] fixed cumtrapz depreciation --- pynumdiff/linear_model/_linear_model.py | 2 +- pynumdiff/utils/utility.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pynumdiff/linear_model/_linear_model.py b/pynumdiff/linear_model/_linear_model.py index 39e728e..1b35822 100644 --- a/pynumdiff/linear_model/_linear_model.py +++ b/pynumdiff/linear_model/_linear_model.py @@ -414,7 +414,7 @@ def __integrate_dxdt_hat_matrix__(dxdt_hat, dt): #assert isinstance(dxdt_hat, np.matrix) if len(dxdt_hat.shape) == 1: dxdt_hat = np.reshape(dxdt_hat, [1, len(dxdt_hat)]) - x = np.array(scipy.integrate.cumtrapz(dxdt_hat, axis=1)) + x = np.array(scipy.integrate.cumulative_trapezoid(dxdt_hat, axis=1)) first_value = x[:, 0:1] - np.mean(dxdt_hat[:, 0:1], axis=1).reshape(dxdt_hat.shape[0], 1) x = np.hstack((first_value, x))*dt return x diff --git a/pynumdiff/utils/utility.py b/pynumdiff/utils/utility.py index bbf9249..c561e71 100644 --- a/pynumdiff/utils/utility.py +++ b/pynumdiff/utils/utility.py @@ -213,7 +213,7 @@ def finite_difference(x, dt): # Trapazoidal integration, with interpolated final point so that the lengths match. def integrate_dxdt_hat(dxdt_hat, dt): """ - Wrapper for scipy.integrate.cumtrapz to integrate dxdt_hat that ensures the integral has the same length + Wrapper for scipy.integrate.cumulative_trapezoid to integrate dxdt_hat that ensures the integral has the same length :param dxdt_hat: estimate derivative of timeseries :type dxdt_hat: np.array @@ -224,7 +224,7 @@ def integrate_dxdt_hat(dxdt_hat, dt): :return: integral of dxdt_hat :rtype: np.array """ - x = scipy.integrate.cumtrapz(dxdt_hat) + x = scipy.integrate.cumulative_trapezoid(dxdt_hat) first_value = x[0] - np.mean(dxdt_hat[0:1]) x = np.hstack((first_value, x))*dt return x