diff --git a/pynumdiff/tests/test_optimize.py b/pynumdiff/tests/test_optimize.py index 633153d..4bcc374 100644 --- a/pynumdiff/tests/test_optimize.py +++ b/pynumdiff/tests/test_optimize.py @@ -80,7 +80,7 @@ def test_butterdiff(): np.testing.assert_array_less( np.abs(params_1[0] - 9), 1.001, err_msg=get_err_msg(params_1, [9, 0.157])) np.testing.assert_array_less( np.abs(params_1[1] - 0.157), 0.01, err_msg=get_err_msg(params_1, [9, 0.157])) #np.testing.assert_almost_equal(params_1, [9, 0.157], decimal=3, err_msg=get_err_msg(params_1, [9, 0.157])) - np.testing.assert_almost_equal(params_2, [2, 0.99], decimal=3, err_msg=get_err_msg(params_2, [2, 0.99])) + np.testing.assert_almost_equal(params_2, [3, 0.99], decimal=3, err_msg=get_err_msg(params_2, [3, 0.99])) def test_splinediff(): params_1, val_1 = splinediff(x, dt, params=None, options={'iterate': True}, diff --git a/pynumdiff/tests/test_total_variation_regularization.py b/pynumdiff/tests/test_total_variation_regularization.py index 61f4188..3a94f0c 100644 --- a/pynumdiff/tests/test_total_variation_regularization.py +++ b/pynumdiff/tests/test_total_variation_regularization.py @@ -68,7 +68,7 @@ def test_smooth_acceleration(): params = [5, 30] x_hat, dxdt_hat = smooth_acceleration(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([4.16480747, 5.52913444, 6.77037146, 7.87267273, 8.79483088, + x_smooth = np.array([4.2, 5.52913444, 6.77037146, 7.87267273, 8.79483088, 9.5044844, 9.97926076, 10.20730827, 10.18728338, 9.92792114, 9.44728533, 8.77174094, 7.93472066, 6.97538656, 5.93725369]) dxdt = np.array([136.43269721, 129.9388182, 118.30858578, 102.15166804, diff --git a/pynumdiff/tests/test_utils.py b/pynumdiff/tests/test_utils.py index c5ad026..882570d 100644 --- a/pynumdiff/tests/test_utils.py +++ b/pynumdiff/tests/test_utils.py @@ -7,8 +7,27 @@ from pynumdiff.utils import utility, simulate, evaluate -def test_utility(): - return# TODO. There are a lot of basic integration functionalities, etc. that deserve tests. +def test_integrate_dxdt_hat(): + dt = 0.1 + for dxdt,expected in [(np.ones(10), np.arange(0, 1, 0.1)), # constant derivative + (np.linspace(0, 1, 11), [0, 0.005, 0.02, 0.045, 0.08, 0.125, 0.18, 0.245, 0.32, 0.405, 0.5]), # linear derivative + (np.array([1.0]), [0])]: # edge case of just one entry + x_hat = utility.integrate_dxdt_hat(dxdt, dt) + np.testing.assert_allclose(x_hat, expected) + assert len(x_hat) == len(dxdt) + +def test_estimate_initial_condition(): + for x,x_hat,c in [([1.0, 2.0, 3.0, 4.0, 5.0], [0.0, 1.0, 2.0, 3.0, 4.0], 1), # Perfect alignment case, xhat shifted by 1 + (np.ones(5)*10, np.ones(5)*5, 5), + ([0], [1], -1)]: + x0 = utility.estimate_initial_condition(x, x_hat) + np.testing.assert_allclose(x0, float(c), rtol=1e-3) + + np.random.seed(42) # Noisy case. Seed for reproducibility + x0 = utility.estimate_initial_condition([1.0, 2.0, 3.0, 4.0, 5.0], + np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + np.random.normal(0, 0.1, 5)) + assert 0.9 < x0 < 1.1 # The result should be close to 1.0, but not exactly due to noise + def test_simulate(): return diff --git a/pynumdiff/utils/utility.py b/pynumdiff/utils/utility.py index e43c260..115f308 100644 --- a/pynumdiff/utils/utility.py +++ b/pynumdiff/utils/utility.py @@ -135,43 +135,32 @@ def integrate_dxdt_hat(dxdt_hat, dt): :return: **x_hat** (np.array[float]) -- integral of dxdt_hat """ - x = scipy.integrate.cumulative_trapezoid(dxdt_hat) - first_value = x[0] - dxdt_hat[0] - return np.hstack((first_value, x))*dt + return np.hstack((0, scipy.integrate.cumulative_trapezoid(dxdt_hat)))*dt # Optimization routine to estimate the integration constant. def estimate_initial_condition(x, x_hat): - """ - Integration leaves an unknown integration constant. This function finds a best fit integration constant given x, and x_hat (the integral of dxdt_hat) - - :param x: timeseries of measurements - :type x: np.array + """Integration leaves an unknown integration constant. This function finds a best fit integration constant given x and + x_hat (the integral of dxdt_hat) by optimizing :math:`\\min_c ||x - \\hat{x} + c||_2`. - :param x_hat: smoothed estiamte of x, for the purpose of this function this should have been determined by integrate_dxdt_hat - :type x_hat: np.array + :param np.array[float] x: timeseries of measurements + :param np.array[float] x_hat: smoothed estiamte of x, for the purpose of this function this should have been determined + by integrate_dxdt_hat - :return: integration constant (i.e. initial condition) that best aligns x_hat with x - :rtype: float + :return: **integration constant** (float) -- initial condition that best aligns x_hat with x """ - def f(x0, *args): - x, x_hat = args[0] - error = np.linalg.norm(x - (x_hat+x0)) - return error - result = scipy.optimize.minimize(f, [0], args=[x, x_hat], method='SLSQP') - return result.x + return scipy.optimize.minimize(lambda x0, x, xhat: np.linalg.norm(x - (x_hat+x0)), # fn to minimize in 1st argument + 0, args=(x, x_hat), method='SLSQP').x[0] # result is a vector, even if initial guess is just a scalar # kernels def _mean_kernel(window_size): - """A uniform boxcar of total integral 1 - """ + """A uniform boxcar of total integral 1""" return np.ones(window_size)/window_size def _gaussian_kernel(window_size): - """A truncated gaussian - """ + """A truncated gaussian""" sigma = window_size / 6. t = np.linspace(-2.7*sigma, 2.7*sigma, window_size) ker = 1/np.sqrt(2*np.pi*sigma**2) * np.exp(-(t**2)/(2*sigma**2)) # gaussian function itself @@ -179,8 +168,7 @@ def _gaussian_kernel(window_size): def _friedrichs_kernel(window_size): - """A bump function - """ + """A bump function""" x = np.linspace(-0.999, 0.999, window_size) ker = np.exp(-1/(1-x**2)) return ker / np.sum(ker)