From 00ed2205a4f61f823c8aeedf349302c7259f6858 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 18 Feb 2025 19:33:10 -0500 Subject: [PATCH 01/48] Modified fastccd.c/h/module.c to remove gain setting --- src/fastccd.c | 15 +++++++++++++-- src/fastccd.h | 17 ++++++++++------- src/fastccdmodule.c | 19 +++++++++++++------ 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/src/fastccd.c b/src/fastccd.c index 0099b9f..27311c6 100644 --- a/src/fastccd.c +++ b/src/fastccd.c @@ -44,8 +44,11 @@ // Correct fast ccd images by looping over all images correcting for background +// Nisar +//int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, +// int ndims, index_t *dims, data_t* gain){ int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, - int ndims, index_t *dims, data_t* gain){ + int ndims, index_t *dims){ index_t nimages,k; int n; @@ -61,7 +64,9 @@ int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, index_t imsize = dims[ndims-1] * dims[ndims-2]; -#pragma omp parallel for private(k) shared(in, out, bg, imsize, gain, flat) schedule(static,imsize) +// Nisar +//#pragma omp parallel for private(k) shared(in, out, bg, imsize, gain, flat) schedule(static,imsize) +#pragma omp parallel for private(k) shared(in, out, bg, imsize, flat) schedule(static,imsize) for(k=0;k Date: Tue, 18 Feb 2025 19:52:17 -0500 Subject: [PATCH 02/48] small error fixed --- src/fastccd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fastccd.c b/src/fastccd.c index 27311c6..4f11aee 100644 --- a/src/fastccd.c +++ b/src/fastccd.c @@ -89,7 +89,7 @@ int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, } */ if(in[k]){ - out[k] = *flatp * ((data_t)(in[k] & PIXEL_MASK) - *bgp); + out[k] = *flatp * ((data_t)(in[k]) - *bgp); } } From 24eed3c633682f37184fa874fb8067dbf1736fee Mon Sep 17 00:00:00 2001 From: nisar Date: Thu, 20 Feb 2025 20:52:30 -0500 Subject: [PATCH 03/48] modified utils.py settings.py and fastccd/images.py to remove gain setting --- csxtools/fastccd/images.py | 9 ++--- csxtools/settings.py | 2 +- csxtools/utils.py | 69 ++++++++++---------------------------- 3 files changed, 21 insertions(+), 59 deletions(-) diff --git a/csxtools/fastccd/images.py b/csxtools/fastccd/images.py index 59296b6..eeb718d 100644 --- a/csxtools/fastccd/images.py +++ b/csxtools/fastccd/images.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): +def correct_images(images, dark=None, flat=None): """Subtract backgrond and gain correct images This routine subtrtacts the backgrond and corrects the images @@ -24,9 +24,6 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): flat : array_like, optional Input array for the flatfield correction. This should be of shape (y, x) - gain : tuple, optional - These are the gain multiplication factors for the three different - gain settings Returns ------- @@ -41,7 +38,6 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): if dark is None: dark = np.zeros(images.shape[-2:], dtype=np.float32) - dark = np.array((dark, dark, dark)) logger.info("Not correcting for darkfield. No input.") if flat is None: flat = np.ones(images.shape[-2:], dtype=np.float32) @@ -49,8 +45,7 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): else: flat = np.asarray(flat, dtype=np.float32) - data = fastccd.correct_images(images.astype(np.uint16), - dark, flat, gain) + data = fastccd.correct_images(images.astype(np.uint16),dark, flat) t = ttime.time() - t logger.info("Corrected image stack in %.3f seconds", t) diff --git a/csxtools/settings.py b/csxtools/settings.py index 3ae520e..508fb72 100644 --- a/csxtools/settings.py +++ b/csxtools/settings.py @@ -1,3 +1,3 @@ detectors = {} -detectors['fccd'] = 'fccd_image' +detectors['axis1'] = 'axis1_image' diff_angles = ['delta', 'theta', 'gamma', None, None, None] diff --git a/csxtools/utils.py b/csxtools/utils.py index 2749568..7eadc0e 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -10,15 +10,14 @@ logger = logging.getLogger(__name__) -def get_fastccd_images(light_header, dark_headers=None, - flat=None, gain=(1, 4, 8), tag=None, roi=None): +def get_fastccd_images(light_header, dark_header=None, + flat=None, tag=None, roi=None): """Retreive and correct FastCCD Images from associated headers Retrieve FastCCD Images from databroker and correct for: - Bad Pixels (converted to ``np.nan``) - Backgorund. - - Multigain bits. - Flatfield correction. - Rotation (returned images are rotated 90 deg cw) @@ -27,19 +26,13 @@ def get_fastccd_images(light_header, dark_headers=None, light_header : databorker header This header defines the images to convert - dark_headers : tuple of 3 databroker headers , optional - These headers are the dark images. The tuple should be formed - from the dark image sets for the Gain 8, Gain 2 and Gain 1 - (most sensitive to least sensitive) settings. If a set is not - avaliable then ``None`` can be entered. + dark_headers : databroker headers , optional + The header is the dark images. flat : array_like Array to use for the flatfield correction. This should be a 2D array sized as the last two dimensions of the image stack. - gain : tuple - Gain multipliers for the 3 gain settings (most sensitive to - least sensitive) tag : string Data tag used to retrieve images. Used in the call to @@ -57,7 +50,7 @@ def get_fastccd_images(light_header, dark_headers=None, """ if tag is None: - tag = detectors['fccd'] + tag = detectors['axis1'] # Now lets sort out the ROI if roi is not None: @@ -67,53 +60,27 @@ def get_fastccd_images(light_header, dark_headers=None, roi[3] = roi[1] + roi[3] logger.info("Computing with ROI of %s", str(roi)) - if dark_headers is None: + if dark_header is None: bgnd = None logger.warning("Processing without dark images") else: - if dark_headers[0] is None: - raise NotImplementedError("Use of header metadata to find dark" - " images is not implemented yet.") # Read the images for the dark headers t = ttime.time() - dark = [] - for i, d in enumerate(dark_headers): - if d is not None: - # Get the images + d = dark_header + bgnd_events = _get_images(d, tag, roi) - bgnd_events = _get_images(d, tag, roi) - - # We assume that all images are for the background - # TODO : Perhaps we can loop over the generator - # If we want to do something lazy - - tt = ttime.time() - b = bgnd_events.astype(dtype=np.uint16) - logger.info("Image conversion took %.3f seconds", + tt = ttime.time() + b = bgnd_events.astype(dtype=np.uint16) + logger.info("Image conversion took %.3f seconds", ttime.time() - tt) - - b = correct_images(b, gain=(1, 1, 1)) - tt = ttime.time() - b = stackmean(b) - logger.info("Mean of image stack took %.3f seconds", + tt = ttime.time() + b = stackmean(b) + logger.info("Mean of image stack took %.3f seconds", ttime.time() - tt) - else: - if (i == 0): - logger.warning("Missing dark image" - " for gain setting 8") - elif (i == 1): - logger.warning("Missing dark image" - " for gain setting 2") - elif (i == 2): - logger.warning("Missing dark image" - " for gain setting 1") - - dark.append(b) - - bgnd = np.array(dark) + bgnd = np.array(b) logger.info("Computed dark images in %.3f seconds", ttime.time() - t) @@ -125,7 +92,7 @@ def get_fastccd_images(light_header, dark_headers=None, if flat is not None and roi is not None: flat = _crop(flat, roi) - return _correct_fccd_images(events, bgnd, flat, gain) + return _correct_fccd_images(events, bgnd, flat) def get_images_to_4D(images, dtype=None): @@ -183,8 +150,8 @@ def _get_images(header, tag, roi=None): return images -def _correct_fccd_images(image, bgnd, flat, gain): - image = correct_images(image, bgnd, flat, gain) +def _correct_fccd_images(image, bgnd, flat): + image = correct_images(image, bgnd, flat) image = rotate90(image, 'cw') return image From 0a3d842c582e21b66a266dea5c24d3bf03c36165 Mon Sep 17 00:00:00 2001 From: nisar Date: Fri, 21 Feb 2025 19:21:39 -0500 Subject: [PATCH 04/48] Bugs fixed and now it works --- csxtools/fastccd/images.py | 5 +- examples/Correct_FastCCD_Images.ipynb | 272 ++++++++++++++++++++++++-- setup.py | 16 +- src/fastccdmodule.c | 32 ++- 4 files changed, 291 insertions(+), 34 deletions(-) diff --git a/csxtools/fastccd/images.py b/csxtools/fastccd/images.py index eeb718d..ec4c7da 100644 --- a/csxtools/fastccd/images.py +++ b/csxtools/fastccd/images.py @@ -45,7 +45,10 @@ def correct_images(images, dark=None, flat=None): else: flat = np.asarray(flat, dtype=np.float32) - data = fastccd.correct_images(images.astype(np.uint16),dark, flat) + #print(f'Nisar type: {type(images)}') + images_numpy = images.compute() + images_uint16 = images_numpy.astype(np.uint16) + data = fastccd.correct_images(images_uint16, dark, flat) t = ttime.time() - t logger.info("Corrected image stack in %.3f seconds", t) diff --git a/examples/Correct_FastCCD_Images.ipynb b/examples/Correct_FastCCD_Images.ipynb index 6ff7669..8718771 100644 --- a/examples/Correct_FastCCD_Images.ipynb +++ b/examples/Correct_FastCCD_Images.ipynb @@ -19,22 +19,216 @@ "Load the ``databroker`` moudle, ``csxtools`` and various other dependencies" ] }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python312.zip\n", + "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12\n", + "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/lib-dynload\n", + "\n", + "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages\n", + "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/setuptools/_vendor\n" + ] + } + ], + "source": [ + "import sys\n", + "for path in sys.path:\n", + " print(path)" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Tiled version 0.1.0b11\n", + "INFO:tiled.server.app:Tiled version 0.1.0b11\n" + ] + } + ], "source": [ "import numpy as np\n", - "from databroker import DataBroker, get_table\n", - "from csxtools.utils import get_fastccd_images, get_images_to_4D\n", + "#from databroker import DataBroker, get_table\n", + "from databroker import Broker\n", + "db = Broker.named('csx')\n", + "from csxtools.utils import get_fastccd_images, get_images_to_4D, get_fastccd_flatfield\n", "from csxtools.ipynb import image_stack_to_movie, show_image_stack\n", "%matplotlib inline\n", "from matplotlib import pyplot as plt" ] }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[0;31mSignature:\u001b[0m\n", + "\u001b[0mget_fastccd_images\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mlight_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mdark_headers\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mflat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mgain\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mtag\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mroi\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mDocstring:\u001b[0m\n", + "Retreive and correct FastCCD Images from associated headers\n", + "\n", + "Retrieve FastCCD Images from databroker and correct for:\n", + "\n", + "- Bad Pixels (converted to ``np.nan``)\n", + "- Backgorund.\n", + "- Multigain bits.\n", + "- Flatfield correction.\n", + "- Rotation (returned images are rotated 90 deg cw)\n", + "\n", + "Parameters\n", + "----------\n", + "light_header : databorker header\n", + " This header defines the images to convert\n", + "\n", + "dark_headers : tuple of 3 databroker headers , optional\n", + " These headers are the dark images. The tuple should be formed\n", + " from the dark image sets for the Gain 8, Gain 2 and Gain 1\n", + " (most sensitive to least sensitive) settings. If a set is not\n", + " avaliable then ``None`` can be entered.\n", + "\n", + "flat : array_like\n", + " Array to use for the flatfield correction. This should be a 2D\n", + " array sized as the last two dimensions of the image stack.\n", + "\n", + "gain : tuple\n", + " Gain multipliers for the 3 gain settings (most sensitive to\n", + " least sensitive)\n", + "\n", + "tag : string\n", + " Data tag used to retrieve images. Used in the call to\n", + " ``databroker.get_images()``. If `None`, use the defualt from\n", + " the settings.\n", + "\n", + "roi : tuple\n", + " coordinates of the upper-left corner and width and height of\n", + " the ROI: e.g., (x, y, w, h)\n", + "\n", + "Returns\n", + "-------\n", + "dask.array : corrected images\n", + "\u001b[0;31mFile:\u001b[0m /nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py\n", + "\u001b[0;31mType:\u001b[0m function" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "get_fastccd_images?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "ename": "KeyError", + "evalue": "0", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m scan_ff \u001b[38;5;241m=\u001b[39m db[\u001b[38;5;241m202033\u001b[39m]\n\u001b[1;32m 2\u001b[0m scan_ff_dark \u001b[38;5;241m=\u001b[39m db[\u001b[38;5;241m202032\u001b[39m]\n\u001b[0;32m----> 3\u001b[0m a\u001b[38;5;241m=\u001b[39m\u001b[43mget_fastccd_flatfield\u001b[49m\u001b[43m(\u001b[49m\u001b[43mscan_ff\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscan_ff_dark\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlimits\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m0.9\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1.1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py:290\u001b[0m, in \u001b[0;36mget_fastccd_flatfield\u001b[0;34m(light, dark, flat, limits, half_interval)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget_fastccd_flatfield\u001b[39m(light, dark, flat\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, limits\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m0.6\u001b[39m, \u001b[38;5;241m1.4\u001b[39m), half_interval\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[1;32m 264\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Calculate a flatfield from two headers \u001b[39;00m\n\u001b[1;32m 265\u001b[0m \n\u001b[1;32m 266\u001b[0m \u001b[38;5;124;03m This routine calculates the flatfield using the\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[38;5;124;03m Flatfield correction. The correction is orientated as \"raw data\" not final data generated by get_fastccd_images().\u001b[39;00m\n\u001b[1;32m 289\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 290\u001b[0m images \u001b[38;5;241m=\u001b[39m get_images_to_3D(\u001b[43mget_fastccd_images\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdark\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflat\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 291\u001b[0m images \u001b[38;5;241m=\u001b[39m stackmean(images)\n\u001b[1;32m 292\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m half_interval:\n", + "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py:74\u001b[0m, in \u001b[0;36mget_fastccd_images\u001b[0;34m(light_header, dark_headers, flat, gain, tag, roi)\u001b[0m\n\u001b[1;32m 72\u001b[0m logger\u001b[38;5;241m.\u001b[39mwarning(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mProcessing without dark images\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 73\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m---> 74\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mdark_headers\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 75\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse of header metadata to find dark\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 76\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m images is not implemented yet.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 78\u001b[0m \u001b[38;5;66;03m# Read the images for the dark headers\u001b[39;00m\n", + "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/databroker/v1.py:853\u001b[0m, in \u001b[0;36mHeader.__getitem__\u001b[0;34m(self, k)\u001b[0m\n\u001b[1;32m 851\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, k)\n\u001b[1;32m 852\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 853\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m(k)\n", + "\u001b[0;31mKeyError\u001b[0m: 0" + ] + } + ], + "source": [ + "scan_ff = db[202033]\n", + "scan_ff_dark = db[202032]\n", + "a=get_fastccd_flatfield(scan_ff, scan_ff_dark, )" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[0;31mSignature:\u001b[0m\n", + "\u001b[0mget_fastccd_flatfield\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mlight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mdark\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mflat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mlimits\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0.6\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1.4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m \u001b[0mhalf_interval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", + "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mDocstring:\u001b[0m\n", + "Calculate a flatfield from two headers \n", + "\n", + "This routine calculates the flatfield using the\n", + ":func:calculate_flatfield() function after obtaining the images from\n", + "the headers.\n", + "\n", + "Parameters\n", + "----------\n", + "light : databroker header\n", + " The header containing the light images\n", + "dark : databroker header(s)\n", + " The header(s) from the run containin the dark images. See get_fastccd_images for details\n", + "flat : flatfield image (optional)\n", + " The array to be used for the initial flatfield\n", + "limits : tuple limits used for returning corrected pixel flatfield\n", + " The tuple setting lower and upper bound. np.nan returned value is outside bounds\n", + "half_interval : boolean or tuple to perform calculation for only half of the FastCCD\n", + " Default is False. If True, then the hard-code portion is retained. Customize image \n", + " manipulation using a tuple of length 2 for (row_start, row_stop).\n", + "\n", + "\n", + "Returns\n", + "-------\n", + "array_like\n", + " Flatfield correction. The correction is orientated as \"raw data\" not final data generated by get_fastccd_images().\n", + "\u001b[0;31mFile:\u001b[0m /nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py\n", + "\u001b[0;31mType:\u001b[0m function" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "get_fastccd_flatfield?" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -46,7 +240,10 @@ "cell_type": "code", "execution_count": 2, "metadata": { - "collapsed": true + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } }, "outputs": [], "source": [ @@ -65,7 +262,10 @@ "cell_type": "code", "execution_count": 3, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -84,7 +284,10 @@ "cell_type": "code", "execution_count": 4, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -116,7 +319,10 @@ "cell_type": "code", "execution_count": 5, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -156,7 +362,10 @@ "cell_type": "code", "execution_count": 6, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -177,7 +386,10 @@ "cell_type": "code", "execution_count": 7, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -218,7 +430,10 @@ "cell_type": "code", "execution_count": 8, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -241,7 +456,10 @@ "cell_type": "code", "execution_count": 9, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -263,7 +481,10 @@ "cell_type": "code", "execution_count": 10, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -306,7 +527,10 @@ "cell_type": "code", "execution_count": 11, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -336,7 +560,10 @@ "cell_type": "code", "execution_count": 12, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -371,7 +598,10 @@ "cell_type": "code", "execution_count": 13, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [ { @@ -393,9 +623,9 @@ ], "metadata": { "kernelspec": { - "display_name": "xf23id1-srv2 - Analysis Conda Env", - "language": "", - "name": "srv2-analysis-kernel" + "display_name": "Python 3 (beta)", + "language": "python", + "name": "python3_beta" }, "language_info": { "codemirror_mode": { @@ -407,9 +637,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.0" + "version": "3.12.7" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/setup.py b/setup.py index 95e5056..358b3c9 100644 --- a/setup.py +++ b/setup.py @@ -6,9 +6,17 @@ import numpy as np import setuptools - +from setuptools.command.build_ext import build_ext # Import build_ext import versioneer +# Custom build_ext to remove cpython-XX suffix +class CustomBuildExt(build_ext): + def get_ext_filename(self, ext_name): + # Default filename: fastccd.cpython-38-x86_64-linux-gnu.so + filename = super().get_ext_filename(ext_name) + # Strip platform-specific suffix: fastccd.so + return filename.split('.')[0] + '.so' + min_version = (3, 8) if sys.version_info < min_version: error = """ @@ -58,7 +66,11 @@ setup( name="csxtools", version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), + #cmdclass=versioneer.get_cmdclass(), + cmdclass={ + **versioneer.get_cmdclass(), + 'build_ext': CustomBuildExt, # Use the custom build_ext + }, author="Brookhaven National Laboratory", description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.", packages=setuptools.find_packages(exclude=["src", "tests"]), diff --git a/src/fastccdmodule.c b/src/fastccdmodule.c index 3e29a97..9122a43 100644 --- a/src/fastccdmodule.c +++ b/src/fastccdmodule.c @@ -73,7 +73,7 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ if(!input){ goto error; } - bgnd = (PyArrayObject*)PyArray_FROMANY(_bgnd, NPY_FLOAT, 3, 3, NPY_ARRAY_IN_ARRAY); + bgnd = (PyArrayObject*)PyArray_FROMANY(_bgnd, NPY_FLOAT, 2, 2, NPY_ARRAY_IN_ARRAY); if(!bgnd){ goto error; } @@ -89,19 +89,31 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ dims_flat = PyArray_DIMS(flat); // Check array dimensions 0 and 1 are the same - if(dims_bgnd[0] != 3){ - PyErr_SetString(PyExc_ValueError, "Background array must have dimenion 0 = 3"); - goto error; - } - if((dims[ndims-2] != dims_bgnd[1]) && (dims[ndims-2] != dims_flat[0])){ - PyErr_SetString(PyExc_ValueError, "Dimensions of image array (0) do not match"); + //if(dims_bgnd[0] != 3){ + // PyErr_SetString(PyExc_ValueError, "Background array must have dimenion 0 = 3"); + // goto error; + //} +// if((dims[ndims-2] != dims_bgnd[1]) && (dims[ndims-2] != dims_flat[0])){ +// if((dims[ndims-2] != dims_bgnd[0]) && (dims[ndims-2] != dims_flat[0])){ +// PyErr_SetString(PyExc_ValueError, "Dimensions of image array (0) do not match"); +// goto error; +// } + //if((dims[ndims-1] != dims_bgnd[2]) && (dims[ndims-1] != dims_flat[1])){ +// if((dims[ndims-1] != dims_bgnd[1]) && (dims[ndims-1] != dims_flat[1])){ +// PyErr_SetString(PyExc_ValueError, "Dimensions of image array (1) do not match"); +// goto error; +// } + + // Check array dimensions for dark and flat + if(dims[ndims-2] != dims_bgnd[0] || dims[ndims-1] != dims_bgnd[1]){ + PyErr_SetString(PyExc_ValueError, "Dimensions of image array do not match background array dimensions."); goto error; } - if((dims[ndims-1] != dims_bgnd[2]) && (dims[ndims-1] != dims_flat[1])){ - PyErr_SetString(PyExc_ValueError, "Dimensions of image array (1) do not match"); + if(dims[ndims-2] != dims_flat[0] || dims[ndims-1] != dims_flat[1]){ + PyErr_SetString(PyExc_ValueError, "Dimensions of image array do not match flat-field array dimensions."); goto error; } - + out = (PyArrayObject*)PyArray_SimpleNew(ndims, dims, NPY_FLOAT); if(!out){ goto error; From aed1d7aad45c7f1901df7c07558d2def41a6d7f7 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 25 Feb 2025 09:51:03 -0500 Subject: [PATCH 05/48] modifies all 'fccd' to 'axis' and 'fastccd' to 'axis1' --- csxtools/__init__.py | 3 +- csxtools/{fastccd => axis1}/__init__.py | 5 ++- csxtools/{fastccd => axis1}/images.py | 11 +++-- csxtools/{fastccd => axis1}/phocount.py | 0 csxtools/utils.py | 19 +++++---- setup.py | 16 +++++-- src/{fastccd.c => axis1.c} | 29 ++----------- src/{fastccd.h => axis1.h} | 16 ++----- src/{fastccdmodule.c => axis1module.c} | 55 ++++++++----------------- 9 files changed, 57 insertions(+), 97 deletions(-) rename csxtools/{fastccd => axis1}/__init__.py (57%) rename csxtools/{fastccd => axis1}/images.py (84%) rename csxtools/{fastccd => axis1}/phocount.py (100%) rename src/{fastccd.c => axis1.c} (73%) rename src/{fastccd.h => axis1.h} (86%) rename src/{fastccdmodule.c => axis1module.c} (77%) diff --git a/csxtools/__init__.py b/csxtools/__init__.py index b41be8c..0922b78 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -1,6 +1,7 @@ # Now import useful functions -from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401 +#from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401 +from .utils import (get_axis1_images, get_axis1_timestamps) # noqa F401 from .plotting import make_panel_plot # noqa F401 # set version string using versioneer diff --git a/csxtools/fastccd/__init__.py b/csxtools/axis1/__init__.py similarity index 57% rename from csxtools/fastccd/__init__.py rename to csxtools/axis1/__init__.py index 0eed326..2c82a6e 100644 --- a/csxtools/fastccd/__init__.py +++ b/csxtools/axis1/__init__.py @@ -1,7 +1,8 @@ -from .images import correct_images +#from .images import correct_images +from .images import correct_images_axis from .phocount import photon_count -__all__ = ['correct_images', 'photon_count'] +__all__ = ['correct_images_axis', 'photon_count'] # set version string using versioneer from .._version import get_versions diff --git a/csxtools/fastccd/images.py b/csxtools/axis1/images.py similarity index 84% rename from csxtools/fastccd/images.py rename to csxtools/axis1/images.py index ec4c7da..a799daa 100644 --- a/csxtools/fastccd/images.py +++ b/csxtools/axis1/images.py @@ -1,12 +1,13 @@ import numpy as np -from ..ext import fastccd +#from ..ext import fastccd +from ..ext import axis1 import time as ttime import logging logger = logging.getLogger(__name__) -def correct_images(images, dark=None, flat=None): +def correct_images_axis(images, dark=None, flat=None): """Subtract backgrond and gain correct images This routine subtrtacts the backgrond and corrects the images @@ -45,10 +46,8 @@ def correct_images(images, dark=None, flat=None): else: flat = np.asarray(flat, dtype=np.float32) - #print(f'Nisar type: {type(images)}') - images_numpy = images.compute() - images_uint16 = images_numpy.astype(np.uint16) - data = fastccd.correct_images(images_uint16, dark, flat) + #data = fastccd.correct_images(images.astype(np.uint16), dark, flat) + data = axis1.correct_images_axis(images.astype(np.uint16), dark, flat) t = ttime.time() - t logger.info("Corrected image stack in %.3f seconds", t) diff --git a/csxtools/fastccd/phocount.py b/csxtools/axis1/phocount.py similarity index 100% rename from csxtools/fastccd/phocount.py rename to csxtools/axis1/phocount.py diff --git a/csxtools/utils.py b/csxtools/utils.py index 7eadc0e..95069ac 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -1,7 +1,8 @@ import numpy as np import time as ttime -from .fastccd import correct_images +#from .fastccd import correct_images +from .axis1 import correct_images_axis from .image import rotate90, stackmean from .settings import detectors from databroker.assets.handlers import AreaDetectorHDF5TimestampHandler @@ -10,7 +11,7 @@ logger = logging.getLogger(__name__) -def get_fastccd_images(light_header, dark_header=None, +def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): """Retreive and correct FastCCD Images from associated headers @@ -92,7 +93,7 @@ def get_fastccd_images(light_header, dark_header=None, if flat is not None and roi is not None: flat = _crop(flat, roi) - return _correct_fccd_images(events, bgnd, flat) + return _correct_axis_images(events, bgnd, flat) def get_images_to_4D(images, dtype=None): @@ -150,8 +151,8 @@ def _get_images(header, tag, roi=None): return images -def _correct_fccd_images(image, bgnd, flat): - image = correct_images(image, bgnd, flat) +def _correct_axis_images(image, bgnd, flat): + image = correct_images_axis(image, bgnd, flat) image = rotate90(image, 'cw') return image @@ -167,7 +168,7 @@ def _crop(image, roi): return image.T[roi[1]:roi[3], roi[0]:roi[2]].T -def get_fastccd_timestamps(header, tag='fccd_image'): +def get_axis1_timestamps(header, tag='axis1_image'): """Return the FastCCD timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as @@ -227,7 +228,7 @@ def calculate_flatfield(image, limits=(0.6, 1.4)): -def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): +def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): """Calculate a flatfield from two headers This routine calculates the flatfield using the @@ -254,7 +255,7 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv array_like Flatfield correction. The correction is orientated as "raw data" not final data generated by get_fastccd_images(). """ - images = get_images_to_3D(get_fastccd_images(light, dark, flat)) + images = get_images_to_3D(get_axis1_images(light, dark, flat)) images = stackmean(images) if half_interval: if isinstance(half_interval, bool): @@ -271,7 +272,7 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv return flat -def fccd_mask(): +def axis_mask(): """Return the initial flatfield mask for the FastCCD Returns diff --git a/setup.py b/setup.py index 358b3c9..e6d11e9 100644 --- a/setup.py +++ b/setup.py @@ -46,9 +46,16 @@ def get_ext_filename(self, ext_name): with open("requirements-extras.txt") as f: extras_require = {"complete": f.read().split()} -fastccd = Extension( - "fastccd", - sources=["src/fastccdmodule.c", "src/fastccd.c"], +#fastccd = Extension( +# "fastccd", +# sources=["src/fastccdmodule.c", "src/fastccd.c"], +# extra_compile_args=["-fopenmp"], +# extra_link_args=["-lgomp"], +#) + +axis1 = Extension( + "axis1", + sources=["src/axis1module.c", "src/axis1.c"], extra_compile_args=["-fopenmp"], extra_link_args=["-lgomp"], ) @@ -79,7 +86,8 @@ def get_ext_filename(self, ext_name): long_description_content_type='text/markdown', ext_package="csxtools.ext", include_dirs=[np.get_include()], - ext_modules=[fastccd, image, phocount], + #ext_modules=[fastccd, image, phocount], + ext_modules=[axis1, image, phocount], tests_require=["pytest"], install_requires=requirements, extras_require=extras_require, diff --git a/src/fastccd.c b/src/axis1.c similarity index 73% rename from src/fastccd.c rename to src/axis1.c index 4f11aee..e966fff 100644 --- a/src/fastccd.c +++ b/src/axis1.c @@ -40,14 +40,11 @@ #include #include -#include "fastccd.h" +#include "axis1.h" -// Correct fast ccd images by looping over all images correcting for background -// Nisar -//int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, -// int ndims, index_t *dims, data_t* gain){ -int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, +// Correct axis1 images by looping over all images correcting for background +int correct_axis_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, int ndims, index_t *dims){ index_t nimages,k; int n; @@ -64,30 +61,12 @@ int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, index_t imsize = dims[ndims-1] * dims[ndims-2]; -// Nisar -//#pragma omp parallel for private(k) shared(in, out, bg, imsize, gain, flat) schedule(static,imsize) #pragma omp parallel for private(k) shared(in, out, bg, imsize, flat) schedule(static,imsize) for(k=0;k #include -#include "fastccd.h" +#include "axis1.h" -static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ +static PyObject* axis1_correct_images(PyObject *self, PyObject *args){ PyObject *_input = NULL; PyObject *_bgnd = NULL; PyObject *_flat = NULL; @@ -57,13 +57,6 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ npy_intp *dims_bgnd; npy_intp *dims_flat; int ndims; - //float gain[3]; - - - //if(!PyArg_ParseTuple(args, "OOO(fff)", &_input, &_bgnd, &_flat, - // &gain[0], &gain[1], &gain[2])){ - // return NULL; - //} if(!PyArg_ParseTuple(args, "OOO", &_input, &_bgnd, &_flat)){ return NULL; @@ -73,6 +66,7 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ if(!input){ goto error; } + bgnd = (PyArrayObject*)PyArray_FROMANY(_bgnd, NPY_FLOAT, 2, 2, NPY_ARRAY_IN_ARRAY); if(!bgnd){ goto error; @@ -88,22 +82,6 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ dims_bgnd = PyArray_DIMS(bgnd); dims_flat = PyArray_DIMS(flat); - // Check array dimensions 0 and 1 are the same - //if(dims_bgnd[0] != 3){ - // PyErr_SetString(PyExc_ValueError, "Background array must have dimenion 0 = 3"); - // goto error; - //} -// if((dims[ndims-2] != dims_bgnd[1]) && (dims[ndims-2] != dims_flat[0])){ -// if((dims[ndims-2] != dims_bgnd[0]) && (dims[ndims-2] != dims_flat[0])){ -// PyErr_SetString(PyExc_ValueError, "Dimensions of image array (0) do not match"); -// goto error; -// } - //if((dims[ndims-1] != dims_bgnd[2]) && (dims[ndims-1] != dims_flat[1])){ -// if((dims[ndims-1] != dims_bgnd[1]) && (dims[ndims-1] != dims_flat[1])){ -// PyErr_SetString(PyExc_ValueError, "Dimensions of image array (1) do not match"); -// goto error; -// } - // Check array dimensions for dark and flat if(dims[ndims-2] != dims_bgnd[0] || dims[ndims-1] != dims_bgnd[1]){ PyErr_SetString(PyExc_ValueError, "Dimensions of image array do not match background array dimensions."); @@ -127,10 +105,7 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ // Ok now we don't touch Python Object ... Release the GIL Py_BEGIN_ALLOW_THREADS - //correct_fccd_images(input_p, out_p, bgnd_p, flat_p, - // ndims, (index_t*)dims, (data_t*)gain); - - correct_fccd_images(input_p, out_p, bgnd_p, flat_p, + correct_axis_images(input_p, out_p, bgnd_p, flat_p, ndims, (index_t*)dims); Py_END_ALLOW_THREADS @@ -148,24 +123,30 @@ static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ return NULL; } -static PyMethodDef FastCCDMethods[] = { - { "correct_images", fastccd_correct_images, METH_VARARGS, - "Correct FastCCD Images"}, +//static PyMethodDef FastCCDMethods[] = { +// { "correct_images", fastccd_correct_images, METH_VARARGS, +static PyMethodDef AXIS1_Methods[] = { + { "correct_images_axis", axis1_correct_images, METH_VARARGS, + "Correct AXIS1 Images"}, {NULL, NULL, 0, NULL} }; -static struct PyModuleDef fastccdmodule = { +//static struct PyModuleDef fastccdmodule = { +static struct PyModuleDef axis1module = { PyModuleDef_HEAD_INIT, - "fastccd", /* name of module */ + //"fastccd", /* name of module */ + "axis1", /* name of module */ NULL, /* module documentation, may be NULL */ -1, /* size of per-interpreter state of the module, or -1 if the module keeps state in global variables. */ - FastCCDMethods + AXIS1_Methods }; -PyMODINIT_FUNC PyInit_fastccd(void) { +//PyMODINIT_FUNC PyInit_fastccd(void) { +PyMODINIT_FUNC PyInit_axis1(void) { PyObject *m; - m = PyModule_Create(&fastccdmodule); + //m = PyModule_Create(&fastccdmodule); + m = PyModule_Create(&axis1module); if(m == NULL){ return NULL; } From a0386597314befc683179eef5b28b7ba372e9309 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 25 Feb 2025 10:13:45 -0500 Subject: [PATCH 06/48] 'fccd' and 'fastccd' added --- csxtools/__init__.py | 2 +- csxtools/fastccd/__init__.py | 9 ++ csxtools/fastccd/images.py | 58 ++++++++++ csxtools/fastccd/phocount.py | 39 +++++++ csxtools/settings.py | 1 + csxtools/utils.py | 201 ++++++++++++++++++++++++++++++++++- setup.py | 14 +-- src/fastccd.c | 87 +++++++++++++++ src/fastccd.h | 53 +++++++++ src/fastccdmodule.c | 158 +++++++++++++++++++++++++++ 10 files changed, 612 insertions(+), 10 deletions(-) create mode 100644 csxtools/fastccd/__init__.py create mode 100644 csxtools/fastccd/images.py create mode 100644 csxtools/fastccd/phocount.py create mode 100644 src/fastccd.c create mode 100644 src/fastccd.h create mode 100644 src/fastccdmodule.c diff --git a/csxtools/__init__.py b/csxtools/__init__.py index 0922b78..91c925a 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -1,6 +1,6 @@ # Now import useful functions -#from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401 +from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401 from .utils import (get_axis1_images, get_axis1_timestamps) # noqa F401 from .plotting import make_panel_plot # noqa F401 diff --git a/csxtools/fastccd/__init__.py b/csxtools/fastccd/__init__.py new file mode 100644 index 0000000..0eed326 --- /dev/null +++ b/csxtools/fastccd/__init__.py @@ -0,0 +1,9 @@ +from .images import correct_images +from .phocount import photon_count + +__all__ = ['correct_images', 'photon_count'] + +# set version string using versioneer +from .._version import get_versions +__version__ = get_versions()['version'] +del get_versions diff --git a/csxtools/fastccd/images.py b/csxtools/fastccd/images.py new file mode 100644 index 0000000..59296b6 --- /dev/null +++ b/csxtools/fastccd/images.py @@ -0,0 +1,58 @@ +import numpy as np +from ..ext import fastccd +import time as ttime + +import logging +logger = logging.getLogger(__name__) + + +def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): + """Subtract backgrond and gain correct images + + This routine subtrtacts the backgrond and corrects the images + for the multigain FastCCD ADC. + + Parameters + ---------- + in : array_like + Input array of images to correct of shape (N, y, x) where N is the + number of images and x and y are the image size. + dark : array_like, optional + Input array of dark images. This should be of shape (3, y, x). + dark[0] is the gain 8 (most sensitive setting) dark image with + dark[2] being the gain 1 (least sensitive) dark image. + flat : array_like, optional + Input array for the flatfield correction. This should be of shape + (y, x) + gain : tuple, optional + These are the gain multiplication factors for the three different + gain settings + + Returns + ------- + array_like + Array of corrected images of shape (N, y, x) + + """ + + t = ttime.time() + + logger.info("Correcting image stack of shape %s", images.shape) + + if dark is None: + dark = np.zeros(images.shape[-2:], dtype=np.float32) + dark = np.array((dark, dark, dark)) + logger.info("Not correcting for darkfield. No input.") + if flat is None: + flat = np.ones(images.shape[-2:], dtype=np.float32) + logger.info("Not correcting for flatfield. No input.") + else: + flat = np.asarray(flat, dtype=np.float32) + + data = fastccd.correct_images(images.astype(np.uint16), + dark, flat, gain) + t = ttime.time() - t + + logger.info("Corrected image stack in %.3f seconds", t) + + return data diff --git a/csxtools/fastccd/phocount.py b/csxtools/fastccd/phocount.py new file mode 100644 index 0000000..4a911a3 --- /dev/null +++ b/csxtools/fastccd/phocount.py @@ -0,0 +1,39 @@ +from ..ext import phocount as ph + + +def photon_count(data, thresh, mean_filter, std_filter, nsum=3, nan=False): + """Do single photon counting on CCD image + + This routine does single photon counting by cluster analysis. The image + is searched for bright pixels within a threshold and then the energy + deposited by each photon is calculated. + + Parameters + ---------- + data : array_like + Stack of CCD images. This array should be of shape (N, y, x) where + N is the number of images + thresh : tuple + Threshold to use for identifying photons. This should be a tuple of + (min, max) + mean_filter : tuple + Filter only the values of the mean which are within the limits of + the tuple of the form (min, max) + std_filter : tuple + Filter only the values of the standard deviation which are within + the limits of the tuple of the form (min, max) + nsum : int + The number of pixels to use to calculate the energy deposited by the + photon. This should be 0 < nsum <= 9. + nan : bool + If true, replace empty pixels with ``np.nan`` + + Returns + ------- + tuple + Two arrays are returned. The first is an array of size (N, y, x) + where the elements are the integrated energy calculated for each + photon hit. The second array is the standard deviation for the + integrated intensity on each photon hit. + """ + return ph.count(data, thresh, mean_filter, std_filter, nsum, nan) diff --git a/csxtools/settings.py b/csxtools/settings.py index 508fb72..d2e9c2a 100644 --- a/csxtools/settings.py +++ b/csxtools/settings.py @@ -1,3 +1,4 @@ detectors = {} +detectors['fccd'] = 'fccd_image' detectors['axis1'] = 'axis1_image' diff_angles = ['delta', 'theta', 'gamma', None, None, None] diff --git a/csxtools/utils.py b/csxtools/utils.py index 95069ac..285e215 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -1,7 +1,7 @@ import numpy as np import time as ttime -#from .fastccd import correct_images +from .fastccd import correct_images from .axis1 import correct_images_axis from .image import rotate90, stackmean from .settings import detectors @@ -10,6 +10,122 @@ import logging logger = logging.getLogger(__name__) +def get_fastccd_images(light_header, dark_headers=None, + flat=None, gain=(1, 4, 8), tag=None, roi=None): + """Retreive and correct FastCCD Images from associated headers + + Retrieve FastCCD Images from databroker and correct for: + + - Bad Pixels (converted to ``np.nan``) + - Backgorund. + - Multigain bits. + - Flatfield correction. + - Rotation (returned images are rotated 90 deg cw) + + Parameters + ---------- + light_header : databorker header + This header defines the images to convert + + dark_headers : tuple of 3 databroker headers , optional + These headers are the dark images. The tuple should be formed + from the dark image sets for the Gain 8, Gain 2 and Gain 1 + (most sensitive to least sensitive) settings. If a set is not + avaliable then ``None`` can be entered. + + flat : array_like + Array to use for the flatfield correction. This should be a 2D + array sized as the last two dimensions of the image stack. + + gain : tuple + Gain multipliers for the 3 gain settings (most sensitive to + least sensitive) + + tag : string + Data tag used to retrieve images. Used in the call to + ``databroker.get_images()``. If `None`, use the defualt from + the settings. + + roi : tuple + coordinates of the upper-left corner and width and height of + the ROI: e.g., (x, y, w, h) + + Returns + ------- + dask.array : corrected images + + """ + + if tag is None: + tag = detectors['fccd'] + + # Now lets sort out the ROI + if roi is not None: + roi = list(roi) + # Convert ROI to start:stop from start:size + roi[2] = roi[0] + roi[2] + roi[3] = roi[1] + roi[3] + logger.info("Computing with ROI of %s", str(roi)) + + if dark_headers is None: + bgnd = None + logger.warning("Processing without dark images") + else: + if dark_headers[0] is None: + raise NotImplementedError("Use of header metadata to find dark" + " images is not implemented yet.") + + # Read the images for the dark headers + t = ttime.time() + + dark = [] + for i, d in enumerate(dark_headers): + if d is not None: + # Get the images + + bgnd_events = _get_images(d, tag, roi) + + # We assume that all images are for the background + # TODO : Perhaps we can loop over the generator + # If we want to do something lazy + + tt = ttime.time() + b = bgnd_events.astype(dtype=np.uint16) + logger.info("Image conversion took %.3f seconds", + ttime.time() - tt) + + b = correct_images(b, gain=(1, 1, 1)) + tt = ttime.time() + b = stackmean(b) + logger.info("Mean of image stack took %.3f seconds", + ttime.time() - tt) + + else: + if (i == 0): + logger.warning("Missing dark image" + " for gain setting 8") + elif (i == 1): + logger.warning("Missing dark image" + " for gain setting 2") + elif (i == 2): + logger.warning("Missing dark image" + " for gain setting 1") + + dark.append(b) + + bgnd = np.array(dark) + + logger.info("Computed dark images in %.3f seconds", ttime.time() - t) + + events = _get_images(light_header, tag, roi) + + # Ok, so lets return a pims pipeline which does the image conversion + + # Crop Flatfield image + if flat is not None and roi is not None: + flat = _crop(flat, roi) + + return _correct_fccd_images(events, bgnd, flat, gain) def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): @@ -150,6 +266,10 @@ def _get_images(header, tag, roi=None): images = _crop_images(images, roi) return images +def _correct_fccd_images(image, bgnd, flat, gain): + image = correct_images(image, bgnd, flat, gain) + image = rotate90(image, 'cw') + return image def _correct_axis_images(image, bgnd, flat): image = correct_images_axis(image, bgnd, flat) @@ -167,6 +287,29 @@ def _crop(image, roi): roi = [image_shape[-2]-roi[3], roi[0], image_shape[-1]-roi[1], roi[2]] return image.T[roi[1]:roi[3], roi[0]:roi[2]].T +def get_fastccd_timestamps(header, tag='fccd_image'): + """Return the FastCCD timestamps from the Areadetector Data File + + Return a list of numpy arrays of the timestamps for the images as + recorded in the datafile. + + Parameters + ---------- + header : databorker header + This header defines the run + tag : string + This is the tag or name of the fastccd. + + Returns + ------- + list of arrays of the timestamps + + """ + with header.db.reg.handler_context( + {'AD_HDF5': AreaDetectorHDF5TimestampHandler}): + timestamps = list(header.data(tag)) + + return timestamps def get_axis1_timestamps(header, tag='axis1_image'): """Return the FastCCD timestamps from the Areadetector Data File @@ -192,7 +335,6 @@ def get_axis1_timestamps(header, tag='axis1_image'): return timestamps - def calculate_flatfield(image, limits=(0.6, 1.4)): """Calculate a flatfield from fluo data @@ -226,7 +368,48 @@ def calculate_flatfield(image, limits=(0.6, 1.4)): return flat +def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): + """Calculate a flatfield from two headers + + This routine calculates the flatfield using the + :func:calculate_flatfield() function after obtaining the images from + the headers. + + Parameters + ---------- + light : databroker header + The header containing the light images + dark : databroker header(s) + The header(s) from the run containin the dark images. See get_fastccd_images for details + flat : flatfield image (optional) + The array to be used for the initial flatfield + limits : tuple limits used for returning corrected pixel flatfield + The tuple setting lower and upper bound. np.nan returned value is outside bounds + half_interval : boolean or tuple to perform calculation for only half of the FastCCD + Default is False. If True, then the hard-code portion is retained. Customize image + manipulation using a tuple of length 2 for (row_start, row_stop). + + Returns + ------- + array_like + Flatfield correction. The correction is orientated as "raw data" not final data generated by get_fastccd_images(). + """ + images = get_images_to_3D(get_fastccd_images(light, dark, flat)) + images = stackmean(images) + if half_interval: + if isinstance(half_interval, bool): + row_start, row_stop = (7, 486) #hard coded for the broken half of the fccd + else: + row_start, row_stop = half_interval + print(row_start, row_stop) + images[:, row_start:row_stop] = np.nan + flat = calculate_flatfield(images, limits) + removed = np.sum(np.isnan(flat)) + if removed != 0: + logger.warning("Flatfield correction removed %d pixels (%.2f %%)" % + (removed, removed * 100 / flat.size)) + return flat def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): """Calculate a flatfield from two headers @@ -271,6 +454,20 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval (removed, removed * 100 / flat.size)) return flat +def fccd_mask(): + """Return the initial flatfield mask for the FastCCD + + Returns + ------- + np.array of flatfield + + """ + flat = np.ones((960, 960)) + flat[120:250, 0:480] = np.nan + flat[:, 476:484] = np.nan + flat = np.rot90(flat) + + return flat def axis_mask(): """Return the initial flatfield mask for the FastCCD diff --git a/setup.py b/setup.py index e6d11e9..e9ba792 100644 --- a/setup.py +++ b/setup.py @@ -46,12 +46,12 @@ def get_ext_filename(self, ext_name): with open("requirements-extras.txt") as f: extras_require = {"complete": f.read().split()} -#fastccd = Extension( -# "fastccd", -# sources=["src/fastccdmodule.c", "src/fastccd.c"], -# extra_compile_args=["-fopenmp"], -# extra_link_args=["-lgomp"], -#) +fastccd = Extension( + "fastccd", + sources=["src/fastccdmodule.c", "src/fastccd.c"], + extra_compile_args=["-fopenmp"], + extra_link_args=["-lgomp"], +) axis1 = Extension( "axis1", @@ -87,7 +87,7 @@ def get_ext_filename(self, ext_name): ext_package="csxtools.ext", include_dirs=[np.get_include()], #ext_modules=[fastccd, image, phocount], - ext_modules=[axis1, image, phocount], + ext_modules=[fastccd, axis1, image, phocount], tests_require=["pytest"], install_requires=requirements, extras_require=extras_require, diff --git a/src/fastccd.c b/src/fastccd.c new file mode 100644 index 0000000..0099b9f --- /dev/null +++ b/src/fastccd.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014, Brookhaven Science Associates, Brookhaven + * National Laboratory. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * * Neither the name of the Brookhaven Science Associates, Brookhaven + * National Laboratory nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include "fastccd.h" + + +// Correct fast ccd images by looping over all images correcting for background +int correct_fccd_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, + int ndims, index_t *dims, data_t* gain){ + index_t nimages,k; + int n; + + if(ndims == 2) + { + nimages = 1; + } else { + nimages = dims[0]; + for(n=1;n<(ndims-2);n++){ + nimages = nimages * dims[n]; + } + } + + index_t imsize = dims[ndims-1] * dims[ndims-2]; + +#pragma omp parallel for private(k) shared(in, out, bg, imsize, gain, flat) schedule(static,imsize) + for(k=0;k +#include + +/* Include python and numpy header files */ +#include +#define NPY_NO_DEPRECATED_API NPY_1_9_API_VERSION +#include +#include + +#include "fastccd.h" + +static PyObject* fastccd_correct_images(PyObject *self, PyObject *args){ + PyObject *_input = NULL; + PyObject *_bgnd = NULL; + PyObject *_flat = NULL; + PyArrayObject *input = NULL; + PyArrayObject *bgnd = NULL; + PyArrayObject *flat = NULL; + PyArrayObject *out = NULL; + npy_intp *dims; + npy_intp *dims_bgnd; + npy_intp *dims_flat; + int ndims; + float gain[3]; + + + if(!PyArg_ParseTuple(args, "OOO(fff)", &_input, &_bgnd, &_flat, + &gain[0], &gain[1], &gain[2])){ + return NULL; + } + + input = (PyArrayObject*)PyArray_FROMANY(_input, NPY_UINT16, 2, 0,NPY_ARRAY_IN_ARRAY); + if(!input){ + goto error; + } + bgnd = (PyArrayObject*)PyArray_FROMANY(_bgnd, NPY_FLOAT, 3, 3, NPY_ARRAY_IN_ARRAY); + if(!bgnd){ + goto error; + } + + flat = (PyArrayObject*)PyArray_FROMANY(_flat, NPY_FLOAT, 2,2, NPY_ARRAY_IN_ARRAY); + if(!flat){ + goto error; + } + + ndims = PyArray_NDIM(input); + dims = PyArray_DIMS(input); + dims_bgnd = PyArray_DIMS(bgnd); + dims_flat = PyArray_DIMS(flat); + + // Check array dimensions 0 and 1 are the same + if(dims_bgnd[0] != 3){ + PyErr_SetString(PyExc_ValueError, "Background array must have dimenion 0 = 3"); + goto error; + } + if((dims[ndims-2] != dims_bgnd[1]) && (dims[ndims-2] != dims_flat[0])){ + PyErr_SetString(PyExc_ValueError, "Dimensions of image array (0) do not match"); + goto error; + } + if((dims[ndims-1] != dims_bgnd[2]) && (dims[ndims-1] != dims_flat[1])){ + PyErr_SetString(PyExc_ValueError, "Dimensions of image array (1) do not match"); + goto error; + } + + out = (PyArrayObject*)PyArray_SimpleNew(ndims, dims, NPY_FLOAT); + if(!out){ + goto error; + } + + uint16_t* input_p = (uint16_t*)PyArray_DATA(input); + data_t *out_p = (data_t*)PyArray_DATA(out); + data_t *bgnd_p = (data_t*)PyArray_DATA(bgnd); + data_t *flat_p = (data_t*)PyArray_DATA(flat); + + // Ok now we don't touch Python Object ... Release the GIL + Py_BEGIN_ALLOW_THREADS + + correct_fccd_images(input_p, out_p, bgnd_p, flat_p, + ndims, (index_t*)dims, (data_t*)gain); + + Py_END_ALLOW_THREADS + + Py_XDECREF(input); + Py_XDECREF(bgnd); + Py_XDECREF(flat); + return Py_BuildValue("N", out); + +error: + Py_XDECREF(input); + Py_XDECREF(bgnd); + Py_XDECREF(out); + Py_XDECREF(flat); + return NULL; +} + +static PyMethodDef FastCCDMethods[] = { + { "correct_images", fastccd_correct_images, METH_VARARGS, + "Correct FastCCD Images"}, + {NULL, NULL, 0, NULL} +}; + +static struct PyModuleDef fastccdmodule = { + PyModuleDef_HEAD_INIT, + "fastccd", /* name of module */ + NULL, /* module documentation, may be NULL */ + -1, /* size of per-interpreter state of the module, + or -1 if the module keeps state in global variables. */ + FastCCDMethods +}; + +PyMODINIT_FUNC PyInit_fastccd(void) { + PyObject *m; + m = PyModule_Create(&fastccdmodule); + if(m == NULL){ + return NULL; + } + + import_array(); + import_umath(); + + return m; +} From d03cf7153cb2afd5241fe3ff664f5629df17feeb Mon Sep 17 00:00:00 2001 From: nisarnk <78008867+nisarnk@users.noreply.github.com> Date: Tue, 25 Feb 2025 12:22:03 -0500 Subject: [PATCH 07/48] Update utils.py --- csxtools/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index 285e215..fbe14dd 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -423,7 +423,7 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval light : databroker header The header containing the light images dark : databroker header(s) - The header(s) from the run containin the dark images. See get_fastccd_images for details + The header(s) from the run containin the dark images. flat : flatfield image (optional) The array to be used for the initial flatfield limits : tuple limits used for returning corrected pixel flatfield From 12ac1431a70a0f6ef1c59ba2befb2b893ef0745d Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 25 Feb 2025 13:18:16 -0500 Subject: [PATCH 08/48] csxtools/__init__.py modified --- csxtools/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/csxtools/__init__.py b/csxtools/__init__.py index 91c925a..b61ca49 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -1,7 +1,7 @@ # Now import useful functions -from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401 -from .utils import (get_axis1_images, get_axis1_timestamps) # noqa F401 +from .utils import (get_fastccd_images, get_fastccd_flatfield, get_fastccd_timestamps) # noqa F401 +from .utils import (get_axis1_images, get_axis1_flatfield, get_axis1_timestamps) # noqa F401 from .plotting import make_panel_plot # noqa F401 # set version string using versioneer From 49fef4b97137fcba7c3081ceea16d5d463fd5b59 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 4 Mar 2025 17:20:27 -0500 Subject: [PATCH 09/48] Mirror reflection operation is done --- csxtools/utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index fbe14dd..0497095 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -129,6 +129,11 @@ def get_fastccd_images(light_header, dark_headers=None, def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): + flipped_image = _get_axis1_images(light_header, dark_header=None,flat=None, tag=None, roi=None) + return flipped_image[...,::-1] + +def _get_axis1_images(light_header, dark_header=None + flat=None, tag=None, roi=None): """Retreive and correct FastCCD Images from associated headers Retrieve FastCCD Images from databroker and correct for: @@ -438,7 +443,7 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval array_like Flatfield correction. The correction is orientated as "raw data" not final data generated by get_fastccd_images(). """ - images = get_images_to_3D(get_axis1_images(light, dark, flat)) + images = get_images_to_3D(_get_axis1_images(light, dark, flat)) images = stackmean(images) if half_interval: if isinstance(half_interval, bool): From 98e597fefd602dac378f1b02e0d8eae83d1eac2f Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 4 Mar 2025 18:47:29 -0500 Subject: [PATCH 10/48] Bug fix --- csxtools/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index 0497095..513a77f 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -129,10 +129,10 @@ def get_fastccd_images(light_header, dark_headers=None, def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): - flipped_image = _get_axis1_images(light_header, dark_header=None,flat=None, tag=None, roi=None) + flipped_image = _get_axis1_images(light_header, dark_header, flat, tag, roi) return flipped_image[...,::-1] -def _get_axis1_images(light_header, dark_header=None +def _get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): """Retreive and correct FastCCD Images from associated headers From da37d357e9e466e2886b67823ba0208aee0d4104 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 19 Mar 2025 11:20:47 -0400 Subject: [PATCH 11/48] Formatiing and docstring correction is done --- csxtools/__init__.py | 15 +- csxtools/_version.py | 121 ++++++---- csxtools/axis1/__init__.py | 7 +- csxtools/axis1/images.py | 8 +- csxtools/axis1/phocount.py | 39 --- csxtools/fastccd/__init__.py | 5 +- csxtools/fastccd/images.py | 6 +- csxtools/helpers/__init__.py | 25 +- csxtools/helpers/fastccd.py | 446 ++++++++++++++++++++++------------- csxtools/helpers/overscan.py | 201 +++++++++------- csxtools/image/__init__.py | 26 +- csxtools/image/stack.py | 1 + csxtools/image/transform.py | 6 +- csxtools/image_corr.py | 50 ++-- csxtools/ipynb/__init__.py | 6 +- csxtools/ipynb/animation.py | 76 +++--- csxtools/ipynb/nbviewer.py | 4 +- csxtools/plotting.py | 12 +- csxtools/settings.py | 6 +- csxtools/utils.py | 127 +++++----- doc/conf.py | 189 ++++++++------- run_tests.py | 1 + setup.py | 17 +- src/axis1module.c | 8 +- tests/test_fastccd.py | 39 +-- tests/test_image.py | 55 +++-- versioneer.py | 216 ++++++++++------- 27 files changed, 1000 insertions(+), 712 deletions(-) delete mode 100644 csxtools/axis1/phocount.py diff --git a/csxtools/__init__.py b/csxtools/__init__.py index b61ca49..523babe 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -1,10 +1,19 @@ # Now import useful functions -from .utils import (get_fastccd_images, get_fastccd_flatfield, get_fastccd_timestamps) # noqa F401 -from .utils import (get_axis1_images, get_axis1_flatfield, get_axis1_timestamps) # noqa F401 +from .utils import ( + get_fastccd_images, + get_fastccd_flatfield, + get_fastccd_timestamps, +) # noqa F401 +from .utils import ( + get_axis1_images, + get_axis1_flatfield, + get_axis1_timestamps, +) # noqa F401 from .plotting import make_panel_plot # noqa F401 # set version string using versioneer from ._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/csxtools/_version.py b/csxtools/_version.py index a33ea8e..2e6a0b3 100644 --- a/csxtools/_version.py +++ b/csxtools/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -57,6 +56,7 @@ def decorate(f): HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate @@ -67,9 +67,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -99,12 +102,17 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix)) + print( + "guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + } @register_vcs_handler("git", "get_keywords") @@ -144,7 +152,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -153,27 +161,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs-tags)) + print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None - } + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags"} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + } @register_vcs_handler("git", "pieces_from_vcs") @@ -193,9 +206,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long"], - cwd=root) + describe_out = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -218,17 +231,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -237,10 +249,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -251,8 +265,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @@ -281,8 +294,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -389,10 +401,12 @@ def render_git_describe_long(pieces): def render(pieces, style): if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"]} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + } if not style or style == "default": style = "pep440" # the default @@ -412,8 +426,12 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + } def get_versions(): @@ -426,8 +444,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass @@ -436,12 +453,15 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -455,6 +475,9 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + } diff --git a/csxtools/axis1/__init__.py b/csxtools/axis1/__init__.py index 2c82a6e..67b9e18 100644 --- a/csxtools/axis1/__init__.py +++ b/csxtools/axis1/__init__.py @@ -1,10 +1,11 @@ -#from .images import correct_images +# from .images import correct_images from .images import correct_images_axis from .phocount import photon_count -__all__ = ['correct_images_axis', 'photon_count'] +__all__ = ["correct_images_axis", "photon_count"] # set version string using versioneer from .._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/csxtools/axis1/images.py b/csxtools/axis1/images.py index a799daa..2a3a6f9 100644 --- a/csxtools/axis1/images.py +++ b/csxtools/axis1/images.py @@ -1,9 +1,9 @@ import numpy as np -#from ..ext import fastccd from ..ext import axis1 import time as ttime import logging + logger = logging.getLogger(__name__) @@ -11,11 +11,11 @@ def correct_images_axis(images, dark=None, flat=None): """Subtract backgrond and gain correct images This routine subtrtacts the backgrond and corrects the images - for the multigain FastCCD ADC. + for AXIS1. Parameters ---------- - in : array_like + images : array_like Input array of images to correct of shape (N, y, x) where N is the number of images and x and y are the image size. dark : array_like, optional @@ -46,7 +46,7 @@ def correct_images_axis(images, dark=None, flat=None): else: flat = np.asarray(flat, dtype=np.float32) - #data = fastccd.correct_images(images.astype(np.uint16), dark, flat) + # data = fastccd.correct_images(images.astype(np.uint16), dark, flat) data = axis1.correct_images_axis(images.astype(np.uint16), dark, flat) t = ttime.time() - t diff --git a/csxtools/axis1/phocount.py b/csxtools/axis1/phocount.py deleted file mode 100644 index 4a911a3..0000000 --- a/csxtools/axis1/phocount.py +++ /dev/null @@ -1,39 +0,0 @@ -from ..ext import phocount as ph - - -def photon_count(data, thresh, mean_filter, std_filter, nsum=3, nan=False): - """Do single photon counting on CCD image - - This routine does single photon counting by cluster analysis. The image - is searched for bright pixels within a threshold and then the energy - deposited by each photon is calculated. - - Parameters - ---------- - data : array_like - Stack of CCD images. This array should be of shape (N, y, x) where - N is the number of images - thresh : tuple - Threshold to use for identifying photons. This should be a tuple of - (min, max) - mean_filter : tuple - Filter only the values of the mean which are within the limits of - the tuple of the form (min, max) - std_filter : tuple - Filter only the values of the standard deviation which are within - the limits of the tuple of the form (min, max) - nsum : int - The number of pixels to use to calculate the energy deposited by the - photon. This should be 0 < nsum <= 9. - nan : bool - If true, replace empty pixels with ``np.nan`` - - Returns - ------- - tuple - Two arrays are returned. The first is an array of size (N, y, x) - where the elements are the integrated energy calculated for each - photon hit. The second array is the standard deviation for the - integrated intensity on each photon hit. - """ - return ph.count(data, thresh, mean_filter, std_filter, nsum, nan) diff --git a/csxtools/fastccd/__init__.py b/csxtools/fastccd/__init__.py index 0eed326..94d28ce 100644 --- a/csxtools/fastccd/__init__.py +++ b/csxtools/fastccd/__init__.py @@ -1,9 +1,10 @@ from .images import correct_images from .phocount import photon_count -__all__ = ['correct_images', 'photon_count'] +__all__ = ["correct_images", "photon_count"] # set version string using versioneer from .._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/csxtools/fastccd/images.py b/csxtools/fastccd/images.py index 59296b6..7275bf7 100644 --- a/csxtools/fastccd/images.py +++ b/csxtools/fastccd/images.py @@ -3,6 +3,7 @@ import time as ttime import logging + logger = logging.getLogger(__name__) @@ -14,7 +15,7 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): Parameters ---------- - in : array_like + images : array_like Input array of images to correct of shape (N, y, x) where N is the number of images and x and y are the image size. dark : array_like, optional @@ -49,8 +50,7 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)): else: flat = np.asarray(flat, dtype=np.float32) - data = fastccd.correct_images(images.astype(np.uint16), - dark, flat, gain) + data = fastccd.correct_images(images.astype(np.uint16), dark, flat, gain) t = ttime.time() - t logger.info("Corrected image stack in %.3f seconds", t) diff --git a/csxtools/helpers/__init__.py b/csxtools/helpers/__init__.py index f669ba0..ea162ca 100644 --- a/csxtools/helpers/__init__.py +++ b/csxtools/helpers/__init__.py @@ -1,9 +1,26 @@ -from .fastccd import (get_dark_near, get_dark_near_all, get_fastccd_roi, get_fastccd_exp, get_fastccd_images_sized, convert_photons) -from .overscan import (get_os_correction_images, get_os_dropped_images) +from .fastccd import ( + get_dark_near, + get_dark_near_all, + get_fastccd_roi, + get_fastccd_exp, + get_fastccd_images_sized, + convert_photons, +) +from .overscan import get_os_correction_images, get_os_dropped_images -__all__ = ['get_dark_near', 'get_dark_near_all', 'get_fastccd_roi', 'get_fastccd_exp', 'get_fastccd_images_sized', 'convert_photons', 'get_os_correction_images', 'get_os_dropped_images'] +__all__ = [ + "get_dark_near", + "get_dark_near_all", + "get_fastccd_roi", + "get_fastccd_exp", + "get_fastccd_images_sized", + "convert_photons", + "get_os_correction_images", + "get_os_dropped_images", +] # set version string using versioneer from .._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py index 3ca8d54..6944583 100644 --- a/csxtools/helpers/fastccd.py +++ b/csxtools/helpers/fastccd.py @@ -3,91 +3,150 @@ import numpy as np from csxtools.image import rotate90, stackmean -from csxtools.utils import calculate_flatfield, get_images_to_3D, get_fastccd_images, get_images_to_4D +from csxtools.utils import ( + calculate_flatfield, + get_images_to_3D, + get_fastccd_images, + get_images_to_4D, +) from csxtools.helpers.overscan import get_os_correction_images, get_os_dropped_images import logging + logger = logging.getLogger(__name__) -from ipywidgets import interact #TODO move this and general untility to different module later (like movie making) +from ipywidgets import ( + interact, +) # TODO move this and general untility to different module later (like movie making) + -def browse_3Darray(res,title='Frame'):#, extra_scalar_dict=None): - """ Widget for notebooks. Sliding bar to browse 3D python array. Must plot using subplots method with 1 axes. +def browse_3Darray(res, title="Frame"): # , extra_scalar_dict=None): + """Widget for notebooks. Sliding bar to browse 3D python array. Must plot using subplots method with 1 axes. res : 3D array with the first element being interated - + title : string to be the title of the plot - """ + """ N = len(res) + def view_image(i=0): im.set_data(res[i]) - #if extra_scalar_dict is not None: + # if extra_scalar_dict is not None: # key = extra_scalr_dict.keys()[0] # values = extra_scalar_dict.values() - - #if extra_scalar_dict is None: + + # if extra_scalar_dict is None: # ax.set_title(f'{title} {i} {key} {values[i]}') - #else: - ax.set_title(f'{title} {i}') + # else: + ax.set_title(f"{title} {i}") fig.canvas.draw_idle() - interact(view_image, i=(0, N-1)) - - -#### FCCD specific stuff starts here -def find_possible_darks(header, dark_gain, search_time, return_debug_info,exposure_time_tolerance = 0.002, db=None): - darks_possible ={'scan':[],'exp_time':[], 'delta_time':[] } + interact(view_image, i=(0, N - 1)) + + +#### FCCD specific stuff starts here + + +def find_possible_darks( + header, + dark_gain, + search_time, + return_debug_info, + exposure_time_tolerance=0.002, + db=None, +): + darks_possible = {"scan": [], "exp_time": [], "delta_time": []} start_time = header.start["time"] stop_time = header.stop["time"] - if header.stop["exit_status"] != 'abort': #because the key is missing from descriptors, was never recorded - #try: - exp_time = header.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time'] - #except: - #print(header.start["scan_id"]) - #raise - - - hhs = db(since = start_time - search_time, until = start_time, **{'fccd.image': 'dark'}, **{'fccd.gain': dark_gain}) - data = [[h.start["scan_id"], h.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time'], - start_time-h.start['time']] for h in hhs if getattr(h, 'stop', {}).get('exit_status', 'not done') == 'success'] - - hhs = db(since = stop_time, until = stop_time + search_time, **{'fccd.image': 'dark'}, **{'fccd.gain': dark_gain}) - data.extend( [[h.start["scan_id"], h.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time'], - h.stop['time']-stop_time] for h in hhs if getattr(h, 'stop', {}).get('exit_status', 'not done') == 'success']) - data=np.array(data) - #print(data) - for i,k in enumerate(darks_possible.keys()): + if ( + header.stop["exit_status"] != "abort" + ): # because the key is missing from descriptors, was never recorded + # try: + exp_time = header.descriptors[0]["configuration"]["fccd"]["data"][ + "fccd_cam_acquire_time" + ] + # except: + # print(header.start["scan_id"]) + # raise + + hhs = db( + since=start_time - search_time, + until=start_time, + **{"fccd.image": "dark"}, + **{"fccd.gain": dark_gain}, + ) + data = [ + [ + h.start["scan_id"], + h.descriptors[0]["configuration"]["fccd"]["data"]["fccd_cam_acquire_time"], + start_time - h.start["time"], + ] + for h in hhs + if getattr(h, "stop", {}).get("exit_status", "not done") == "success" + ] + + hhs = db( + since=stop_time, + until=stop_time + search_time, + **{"fccd.image": "dark"}, + **{"fccd.gain": dark_gain}, + ) + data.extend( + [ + [ + h.start["scan_id"], + h.descriptors[0]["configuration"]["fccd"]["data"][ + "fccd_cam_acquire_time" + ], + h.stop["time"] - stop_time, + ] + for h in hhs + if getattr(h, "stop", {}).get("exit_status", "not done") == "success" + ] + ) + data = np.array(data) + # print(data) + for i, k in enumerate(darks_possible.keys()): try: - darks_possible[k] = data[:,i] + darks_possible[k] = data[:, i] except IndexError: darks_possible[k] = None return darks_possible - + darks_possible = pandas.DataFrame(darks_possible) - #clean up if exposure times are not within exp_time_tolerance seconds - darks_possible = darks_possible[darks_possible['exp_time'].apply(np.isclose, b=exp_time, atol=exposure_time_tolerance) == True] + # clean up if exposure times are not within exp_time_tolerance seconds + darks_possible = darks_possible[ + darks_possible["exp_time"].apply( + np.isclose, b=exp_time, atol=exposure_time_tolerance + ) + == True + ] - return darks_possible -def get_dark_near(header, dark_gain = 'auto', search_time=30*60, return_debug_info = False, db=None): - """ Find and extract the most relevant dark image (relevant in time and gain setting) for a given scan. + +def get_dark_near( + header, dark_gain="auto", search_time=30 * 60, return_debug_info=False, db=None +): + """Find and extract the most relevant dark image (relevant in time and gain setting) for a given scan. header : databroker header of blueksy scan - dark_gain : string + dark_gain : string match dark gain settings as described in the start document ('auto', 'x2', 'x1') - search_time : int or float + search_time : int or float time in seconds before (after) the start (stop) document timestamps - + db : Broker.name("csx") is expected. Use databroker v1 or v2 or a wrapped tiled catalog """ - - darks_possible = find_possible_darks(header, dark_gain, search_time, return_debug_info, db=db) - #print( darks_possible ) + + darks_possible = find_possible_darks( + header, dark_gain, search_time, return_debug_info, db=db + ) + # print( darks_possible ) try: - dark = int(darks_possible.sort_values(by='delta_time').reset_index()['scan'][0]) + dark = int(darks_possible.sort_values(by="delta_time").reset_index()["scan"][0]) except: dark = None return None @@ -97,9 +156,13 @@ def get_dark_near(header, dark_gain = 'auto', search_time=30*60, return_debug_in else: return db[dark] + def get_dark_near_all(header, db=None, **kwargs): - d8,d2,d1 = (get_dark_near(header,dark_gain= dg, db=db, **kwargs) for dg in ['auto','x2','x1']) - return d8,d2,d1 + d8, d2, d1 = ( + get_dark_near(header, dark_gain=dg, db=db, **kwargs) + for dg in ["auto", "x2", "x1"] + ) + return d8, d2, d1 def get_fastccd_roi(header, roi_number): @@ -114,92 +177,108 @@ def get_fastccd_roi(header, roi_number): ------- named tuple start_x : int, horizontal starting pixel from left (using output of get_fastccd_images()) - size_x : int, horizontal bin size for ROI + size_x : int, horizontal bin size for ROI start_y : int, vertical starting pixel from top (using output of get_fastccd_images()) size_y : int, vertical bin size for ROI name : string, name assigned by user in ROI (optional) - + """ - config = header.descriptors[0]['configuration']['fccd']['data'] - if config == {}: #prior to mid 2017 + config = header.descriptors[0]["configuration"]["fccd"]["data"] + if config == {}: # prior to mid 2017 x_start, x_size, y_start, y_size = None - logger.warning('Meta data does not exist.') - #elif config[f'fccd_stats{roi_number}_compute_statistics'] == 'Yes': + logger.warning("Meta data does not exist.") + # elif config[f'fccd_stats{roi_number}_compute_statistics'] == 'Yes': else: - x_start = config[f'fccd_roi{roi_number}_min_xyz_min_x'] - x_size = config[f'fccd_roi{roi_number}_size_x'] - y_start = config[f'fccd_roi{roi_number}_min_xyz_min_y'] - y_size = config[f'fccd_roi{roi_number}_size_y'] - name = config[f'fccd_roi{roi_number}_name_'] - - - FCCDroi = namedtuple('FCCDroi', ['start_x', 'size_x', 'start_y', 'size_y', 'name']) + x_start = config[f"fccd_roi{roi_number}_min_xyz_min_x"] + x_size = config[f"fccd_roi{roi_number}_size_x"] + y_start = config[f"fccd_roi{roi_number}_min_xyz_min_y"] + y_size = config[f"fccd_roi{roi_number}_size_y"] + name = config[f"fccd_roi{roi_number}_name_"] + + FCCDroi = namedtuple("FCCDroi", ["start_x", "size_x", "start_y", "size_y", "name"]) return FCCDroi(x_start, x_size, y_start, y_size, name) + def get_fastccd_exp(header): - """Returns named tuple of exposure time, exposure period and number of images per "point" for a databroker header. + """Returns named tuple of exposure time, exposure period and number of images per "point" for a databroker header. Parameters ---------- header : databroker header - + Returns ------- named tuple exp_time : float, exposure time (photon integration) of each image in seconds - exp_period : float, exposure period time in seconds. the time between consecutive frames for a single "point". + exp_period : float, exposure period time in seconds. the time between consecutive frames for a single "point". Most often used to convert XPCS lag_step (or delays) to "time" from "frames" num_images : int, number of images per "point". - + """ - config = header.descriptors[0]['configuration']['fccd']['data'] - if config == {}: #prior to mid 2017 + config = header.descriptors[0]["configuration"]["fccd"]["data"] + if config == {}: # prior to mid 2017 ## this is done because of deprecated gs.DETS and replaced by descriptors. i don't know if db v2 and tiled even handle this okay. ## when we delete data from 2017 we can just delete this part of the code - exp_t = header.table().get('fccd_acquire_time')[1] - exp_p = header.table().get('fccd_acquire_period')[1] - exp_im = header.table().get('fccd_num_images')[1] - else: #After mid 2017 - exp_t = config['fccd_cam_acquire_time'] - exp_p = config['fccd_cam_acquire_period'] - exp_im = config['fccd_cam_num_images'] - - FCCDexp = namedtuple('FCCDexposure_config', ['exp_time' , 'exp_period', 'num_images']) + exp_t = header.table().get("fccd_acquire_time")[1] + exp_p = header.table().get("fccd_acquire_period")[1] + exp_im = header.table().get("fccd_num_images")[1] + else: # After mid 2017 + exp_t = config["fccd_cam_acquire_time"] + exp_p = config["fccd_cam_acquire_period"] + exp_im = config["fccd_cam_num_images"] + + FCCDexp = namedtuple( + "FCCDexposure_config", ["exp_time", "exp_period", "num_images"] + ) return FCCDexp(exp_t, exp_p, exp_im) + def get_fastccd_pixel_readout(header): - """Returns named tuple of details needed to properly concatenate the fccd images. + """Returns named tuple of details needed to properly concatenate the fccd images. Parameters ---------- header : databroker header - + Returns ------- named tuple overscan_cols : int, confgured by timing file for the number of virtual columns for dark current noise rows : int, number of raws for framestore versus nonframestore mode, as instituted by FCCD plugin for EPICS AreaDectector row_offset : int, unused virtual pixels to be removed, as instituted by FCCD plugin for EPICS AreaDectector - + """ - config = header.descriptors[0]['configuration']['fccd']['data'] + config = header.descriptors[0]["configuration"]["fccd"]["data"] try: - overscan_cols = config['fccd_cam_overscan_cols'] #this is hardware config + overscan_cols = config["fccd_cam_overscan_cols"] # this is hardware config except: - overscan_cols = 'unknown' #can code using tiled to infer by Xarray shape; test setting to None + overscan_cols = "unknown" # can code using tiled to infer by Xarray shape; test setting to None try: - rows = config['fccd_fccd1_rows'] - row_offset = config['fccd_fccd1_row_offset'] + rows = config["fccd_fccd1_rows"] + row_offset = config["fccd_fccd1_row_offset"] except: - rows = 'unknown' ##need to rely on hardcoded concatenation ; test setting to None - row_offset = 'unknown' ##need to rely on hardcoded concatenation ; test setting to None - - FCCDconcat = namedtuple('FCCDconcat', ['overscan_cols' , 'rows', 'row_offset']) + rows = ( + "unknown" ##need to rely on hardcoded concatenation ; test setting to None + ) + row_offset = ( + "unknown" ##need to rely on hardcoded concatenation ; test setting to None + ) + + FCCDconcat = namedtuple("FCCDconcat", ["overscan_cols", "rows", "row_offset"]) return FCCDconcat(overscan_cols, rows, row_offset) -def get_fastccd_images_sized(header, dark_headers=None, flat=None, auto_concat = True, auto_overscan=True, return_overscan_array = False, drop_overscan=True): + +def get_fastccd_images_sized( + header, + dark_headers=None, + flat=None, + auto_concat=True, + auto_overscan=True, + return_overscan_array=False, + drop_overscan=True, +): """Normalazied images with proper concatenation and overscan data by calling get_fastccd_images Parameters ---------- - light_header : databorker header + light_header : databorker header dark_headers : tuple of 3 databroker headers , optional These headers are the dark images. The tuple should be formed @@ -209,153 +288,198 @@ def get_fastccd_images_sized(header, dark_headers=None, flat=None, auto_concat = flat : array_like Array to use for the flatfield correction. This should be a 2D - array sized as the last two dimensions of the image stack. + array sized as the last two dimensions of the image stack. See csxtools.utilities.get_flatfield() and use plan_name count_flatfield. - + auto_concat : Boolean True to remove un-needed vertical pixels - + auto_overscan : Boolean True to correct images with overscan data and remove overscan data from the array - + return_overscan_array : Boolean False to not return the overscan data as a seperate array (broadcastable) drop_overscan: Boolean - If auto_overscan False, choose to keep or drop the overscan data from + If auto_overscan False, choose to keep or drop the overscan data from the returned data images - - - + + + Returns ------- - images : 4D array (points, frames-per-point, Vpixels, Hpixels) + images : 4D array (points, frames-per-point, Vpixels, Hpixels) Normalized fastccd data. overscan_data : OPTIONAL 4D array (points, frames-per-point, Vpixel, Hpixels) - Extracted overscan data (2 Vpixels for ever 10 Vpixels). + Extracted overscan data (2 Vpixels for ever 10 Vpixels). auto_concat_performed : Boolean - + auto_os_drop_performed : Boolean - + auto_os_correct_performed : Boolean - + """ - - - #print('Processing scan {}'.format(header['start']['scan_id'])) + + # print('Processing scan {}'.format(header['start']['scan_id'])) images = get_fastccd_images(header, dark_headers, flat=flat) ###TODO write if statement for image shape if the output is an array (future csxtools upgrade), then there is no need for next 2 lines stack = get_images_to_4D(images) images = stack - total_rows = images.shape[-1] #TODO add to descriptors for image output saving?, but dan must have it somewhere in the handler. + total_rows = images.shape[ + -1 + ] # TODO add to descriptors for image output saving?, but dan must have it somewhere in the handler. fccd_concat_params = get_fastccd_pixel_readout(header) - + #### SEE IF OVERSCAN WAS ENABLED if fccd_concat_params.overscan_cols != 2: images_have_overscan = None - #TODO future elif to look at shape of data (1132 pix, not 960) + # TODO future elif to look at shape of data (1132 pix, not 960) else: - images_have_overscan = True #TODO later, go back and add code later to capture the overscan data - - ### make FCCD images the correct shape (except for overscan) + images_have_overscan = ( + True # TODO later, go back and add code later to capture the overscan data + ) + + ### make FCCD images the correct shape (except for overscan) if auto_concat: - if fccd_concat_params.rows != 'unknown': #goback and change to None when testing - leftstart = fccd_concat_params.row_offset+1 ##TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?) - leftend = fccd_concat_params.rows +fccd_concat_params.row_offset - rightstart = total_rows - fccd_concat_params.row_offset -fccd_concat_params.rows + if ( + fccd_concat_params.rows != "unknown" + ): # goback and change to None when testing + leftstart = ( + fccd_concat_params.row_offset + 1 + ) ##TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?) + leftend = fccd_concat_params.rows + fccd_concat_params.row_offset + rightstart = ( + total_rows - fccd_concat_params.row_offset - fccd_concat_params.rows + ) rightend = total_rows - fccd_concat_params.row_offset + 1 else: - logging.warning('Concatenating images based on hard-coded values') - #auto_concat = False ## this seems useless. should do soemthing to return that it was hard-code autoconcat - if total_rows > 1001: ##because non-framestore - logging.warning(f'images are larger than 960 pixels (possibly non-FS mode). The first image shape is {images[0,0].shape}') + logging.warning("Concatenating images based on hard-coded values") + # auto_concat = False ## this seems useless. should do soemthing to return that it was hard-code autoconcat + if total_rows > 1001: ##because non-framestore + logging.warning( + f"images are larger than 960 pixels (possibly non-FS mode). The first image shape is {images[0,0].shape}" + ) leftstart = 486 leftend = 966 - rightstart = 1034 - rightend = 1514 + rightstart = 1034 + rightend = 1514 elif total_rows == 1000: leftstart = 7 leftend = 486 - rightstart = 514 - rightend = 995 + rightstart = 514 + rightend = 995 else: - logging.warning(f'images are unexpected size for auto-concatenation. The first image shape is {images[0,0].shape}. ') - auto_concat = False + logging.warning( + f"images are unexpected size for auto-concatenation. The first image shape is {images[0,0].shape}. " + ) + auto_concat = False auto_concat_performed = False if auto_concat: - print(leftstart, leftend, rightstart, rightend) #TODO add this to verbose warnings level - images = np.concatenate((images[:,:,:,leftstart : leftend],images[:,:,:, rightstart:rightend]),axis=3) + print( + leftstart, leftend, rightstart, rightend + ) # TODO add this to verbose warnings level + images = np.concatenate( + ( + images[:, :, :, leftstart:leftend], + images[:, :, :, rightstart:rightend], + ), + axis=3, + ) auto_concat_performed = True - + ### if older images, overscan will not be in metadata, but it should be clear from the number of columns (960/10*2)+960=1152 if images.shape[-2] == 1152: - logging.warning(f'Overscan columns (2 per 10) are detected. {images_have_overscan}') - #if images_have_overscan == 'unknown': - logging.warning('Attempting to apply overscan removal') - images_have_overscan = True ###TODO this means we also have to return this - + logging.warning( + f"Overscan columns (2 per 10) are detected. {images_have_overscan}" + ) + # if images_have_overscan == 'unknown': + logging.warning("Attempting to apply overscan removal") + images_have_overscan = True ###TODO this means we also have to return this + ### deal with overscan if present if auto_overscan and images_have_overscan: - overscan_data = get_os_correction_images(images) ## this is "broadcastable" with images - print(overscan_data.shape, 'os data returned in same shape as images should be') - images = get_os_dropped_images(np.copy(images)) - print(images.shape, 'os dropped and substracting overscan') + overscan_data = get_os_correction_images( + images + ) ## this is "broadcastable" with images + print(overscan_data.shape, "os data returned in same shape as images should be") + images = get_os_dropped_images(np.copy(images)) + print(images.shape, "os dropped and substracting overscan") auto_os_drop_performed = True images = images - overscan_data auto_os_correct_performed = True elif auto_overscan == False and images_have_overscan and drop_overscan: - images = get_os_dropped_images(np.copy(images)) - print(images.shape,'only dropping os from images') + images = get_os_dropped_images(np.copy(images)) + print(images.shape, "only dropping os from images") auto_os_drop_performed = True auto_os_correct_performed = False elif auto_overscan == False and images_have_overscan and drop_overscan == False: - print(images.shape,'retaining os in returned data images') + print(images.shape, "retaining os in returned data images") auto_os_drop_performed = False auto_os_correct_performed = False else: auto_os_drop_performed = False auto_os_correct_performed = False - + if return_overscan_array: - return images, overscan_data, auto_concat_performed, auto_os_drop_performed, auto_os_correct_performed + return ( + images, + overscan_data, + auto_concat_performed, + auto_os_drop_performed, + auto_os_correct_performed, + ) else: - return images, auto_concat_performed, auto_os_drop_performed, auto_os_correct_performed - + return ( + images, + auto_concat_performed, + auto_os_drop_performed, + auto_os_correct_performed, + ) + -def convert_photons(images_input, energy, ADU_930 = 30, quantize_photons = True, make_int_strip_nan= True, round_to_tens=True): - """Convert ADU to photons based on incident beamline energy. FCCD #2 found to be ~30 ADU fro 930eV (ideally 25 ADU). +def convert_photons( + images_input, + energy, + ADU_930=30, + quantize_photons=True, + make_int_strip_nan=True, + round_to_tens=True, +): + """Convert ADU to photons based on incident beamline energy. FCCD #2 found to be ~30 ADU fro 930eV (ideally 25 ADU). Quantized to photons may be problematic in the realm of 4 photon events per pixel. We should add some histogram information. - + Parameters ---------- images_input : numpy array energy : float, incident photon energy - + quantize_photons : rounds pixel values to one's place. returns float or int based on make_int_strip_nan make_int_strip_nan : converts rounded pixel values to integers and then NaNs are very near zero - + Returns ------- images_output : numpy array converted to photons - + #TODO seems to retain nan's need to use a mask to prevent pixels with nan #TODO do more testing to make sure rounding is alway appropriate scheme (or at all) - #TODO it seems that simple rounding creates +/- 4 photon error around "zero" photons + #TODO it seems that simple rounding creates +/- 4 photon error around "zero" photons """ if round_to_tens: - ADUpPH = round(ADU_930*np.nanmean(energy)/930, -1) #TODO should be ok and more consistent, but need to check with energyscans, + ADUpPH = round( + ADU_930 * np.nanmean(energy) / 930, -1 + ) # TODO should be ok and more consistent, but need to check with energyscans, else: - ADUpPH = round(ADU_930*np.nanmean(energy)/930, 2) + ADUpPH = round(ADU_930 * np.nanmean(energy) / 930, 2) images_input = images_input / ADUpPH if quantize_photons == True: if make_int_strip_nan == True: - images_output = np.round(images_input).astype('int') - else: + images_output = np.round(images_input).astype("int") + else: images_output = np.round(images_input) - else: + else: images_output = images_input return images_output, energy, ADU_930, ADUpPH diff --git a/csxtools/helpers/overscan.py b/csxtools/helpers/overscan.py index 43f1ab6..2aac801 100644 --- a/csxtools/helpers/overscan.py +++ b/csxtools/helpers/overscan.py @@ -1,24 +1,27 @@ import numpy as np + def _extract_from_fccdwithOS_osdata(images, os_cols, data_cols): - if len(images.shape) !=4: - print(f'Input images should be 4D.') + if len(images.shape) != 4: + print(f"Input images should be 4D.") raise - #print(images.shape) + # print(images.shape) points, frames, total_cols, horz_pix = images.shape - super_cols = int(total_cols / (os_cols+data_cols)) - os_cols_data = np.zeros((os_cols, points, frames, super_cols, horz_pix), ) - - #print(f'{os_cols_data.shape=}') - + super_cols = int(total_cols / (os_cols + data_cols)) + os_cols_data = np.zeros( + (os_cols, points, frames, super_cols, horz_pix), + ) + + # print(f'{os_cols_data.shape=}') for i in range(os_cols): - #print(i) - #print(f'\t{os_cols+data_cols}') - os_cols_data[i] = images[:, :, i::os_cols+data_cols, :] - + # print(i) + # print(f'\t{os_cols+data_cols}') + os_cols_data[i] = images[:, :, i :: os_cols + data_cols, :] + return os_cols_data + # def extract_from_fccdwithOS_photondata(images, os_cols, data_cols): # if len(images.shape) !=4: # print(f'Input images should be 4D.') @@ -29,116 +32,134 @@ def _extract_from_fccdwithOS_osdata(images, os_cols, data_cols): # for i in range(data_cols): # data_cols_data[i] = ar_images[:, :, i+os_cols::os_cols+data_cols, :] - + # return data_cols_data -def _make_os_correction_data(os_data, os_cols, data_cols, images_data_shape, ): - #print(f'{os_data.shape=}') - if len(images_data_shape) !=4 and len(os_data.shape) != 4: - print(f'Input images should be 4D.') + +def _make_os_correction_data( + os_data, + os_cols, + data_cols, + images_data_shape, +): + # print(f'{os_data.shape=}') + if len(images_data_shape) != 4 and len(os_data.shape) != 4: + print(f"Input images should be 4D.") raise points, frames, total_cols, horz_pix = images_data_shape - super_cols = int(total_cols / (os_cols+data_cols)) + super_cols = int(total_cols / (os_cols + data_cols)) vert_pix = super_cols * data_cols - - os_data_for_broadcast = np.zeros((points, frames, vert_pix , horz_pix )) - #print(f'{os_data_for_broadcast.shape=}') + + os_data_for_broadcast = np.zeros((points, frames, vert_pix, horz_pix)) + # print(f'{os_data_for_broadcast.shape=}') for i in range(super_cols): - #print(i) - temp = os_data[:,:,i, :].reshape(points, frames, 1, horz_pix) + # print(i) + temp = os_data[:, :, i, :].reshape(points, frames, 1, horz_pix) os_supercol_data = np.broadcast_to(temp, (points, frames, data_cols, horz_pix)) - #print(f'\t{os_supercol_data=}') - #print(f'\t{os_supercol_data.shape=}') - start, stop = i*(data_cols), data_cols*(i+1) - #print(f'\t{start} : {stop}') - os_data_for_broadcast[:,:, start : stop , :] = os_supercol_data - + # print(f'\t{os_supercol_data=}') + # print(f'\t{os_supercol_data.shape=}') + start, stop = i * (data_cols), data_cols * (i + 1) + # print(f'\t{start} : {stop}') + os_data_for_broadcast[:, :, start:stop, :] = os_supercol_data + return os_data_for_broadcast - + + def _drop_os_data(images, os_cols, data_cols): - if len(images.shape) !=4: - print(f'Input images should be 4D.') + if len(images.shape) != 4: + print(f"Input images should be 4D.") raise points, frames, total_cols, horz_pix = images.shape - super_cols = int(total_cols / (os_cols+data_cols)) + super_cols = int(total_cols / (os_cols + data_cols)) vert_pix = super_cols * data_cols - images_no_os = np.zeros(( points, frames, vert_pix, horz_pix) ) - #print(f'{images_no_os.shape=}') - + images_no_os = np.zeros((points, frames, vert_pix, horz_pix)) + # print(f'{images_no_os.shape=}') + for i in range(super_cols): - #print(i) - start_extract, stop_extract = i*(data_cols+os_cols)+os_cols, (data_cols+os_cols)*(i+1)#+os_cols - #print(f'\tOUT OF {start_extract}:{stop_extract}') - temp = images[:,:,start_extract:stop_extract, :] - #print(f'\t{temp.shape}') - start_in, stop_in = i*data_cols, i*data_cols+data_cols - #print(f'\tINTO {start_in}:{stop_in}') - #target = images_no_os[:,:, start_in : stop_in , :] - #print(f'\t{target.shape}') - images_no_os[:,:, start_in : stop_in , :] = temp - - #print(f'{images_no_os.shape=}') - + # print(i) + start_extract, stop_extract = i * (data_cols + os_cols) + os_cols, ( + data_cols + os_cols + ) * ( + i + 1 + ) # +os_cols + # print(f'\tOUT OF {start_extract}:{stop_extract}') + temp = images[:, :, start_extract:stop_extract, :] + # print(f'\t{temp.shape}') + start_in, stop_in = i * data_cols, i * data_cols + data_cols + # print(f'\tINTO {start_in}:{stop_in}') + # target = images_no_os[:,:, start_in : stop_in , :] + # print(f'\t{target.shape}') + images_no_os[:, :, start_in:stop_in, :] = temp + + # print(f'{images_no_os.shape=}') + return images_no_os + def _make_left_right(images): horz_pix = images.shape[-1] - imgs_left = np.flip(np.copy(images[:,:,:,0:int(horz_pix/2)])) - imgs_right = np.copy(images[:,:,:,int(horz_pix/2):horz_pix]) - + imgs_left = np.flip(np.copy(images[:, :, :, 0 : int(horz_pix / 2)])) + imgs_right = np.copy(images[:, :, :, int(horz_pix / 2) : horz_pix]) + return imgs_left, imgs_right -#def _make_whole_from_left_right(images_left, images_right): + +# def _make_whole_from_left_right(images_left, images_right): # images = np.concatenate((np.flip(images_left), images_right), axis=-1) - -def get_os_correction_images(images, os_cols=2, data_cols=10, os_mean=True, os_single_col=None): - if os_mean == 'False' and os_single_col is None: - print('select nth column if not using mean') +def get_os_correction_images( + images, os_cols=2, data_cols=10, os_mean=True, os_single_col=None +): + + if os_mean == "False" and os_single_col is None: + print("select nth column if not using mean") raise - - images_left, images_right = _make_left_right(images) - #print(images_left.shape, images_right.shape) - + + images_left, images_right = _make_left_right(images) + # print(images_left.shape, images_right.shape) + os_extract_left = _extract_from_fccdwithOS_osdata(images_left, os_cols, data_cols) os_extract_right = _extract_from_fccdwithOS_osdata(images_left, os_cols, data_cols) - - #print(os_extract_left.shape, os_extract_right.shape) + + # print(os_extract_left.shape, os_extract_right.shape) if os_mean: - os_imgs_left = _make_os_correction_data(np.mean(os_extract_left, axis=0), - os_cols, data_cols, images_left.shape ) - os_imgs_right = _make_os_correction_data(np.mean(os_extract_right, axis=0), - os_cols, data_cols, images_right.shape ) + os_imgs_left = _make_os_correction_data( + np.mean(os_extract_left, axis=0), os_cols, data_cols, images_left.shape + ) + os_imgs_right = _make_os_correction_data( + np.mean(os_extract_right, axis=0), os_cols, data_cols, images_right.shape + ) else: - os_imgs_left = _make_os_correction_data(os_extract_left[os_single_col], - os_cols, data_cols, images_left.shape ) - os_single_col = int(not os_single_col )#preserving readout order, not location in flipped array - os_imgs_right = _make_os_correction_data(s_extract_right[os_single_col ], - os_cols, data_cols, images_right.shape ) - - #print(os_imgs_left.shape, os_imgs_right.shape) + os_imgs_left = _make_os_correction_data( + os_extract_left[os_single_col], os_cols, data_cols, images_left.shape + ) + os_single_col = int( + not os_single_col + ) # preserving readout order, not location in flipped array + os_imgs_right = _make_os_correction_data( + s_extract_right[os_single_col], os_cols, data_cols, images_right.shape + ) + + # print(os_imgs_left.shape, os_imgs_right.shape) os_imgs = np.concatenate((np.flip(os_imgs_left), os_imgs_right), axis=-1) - - #print(os_imgs.shape) - + + # print(os_imgs.shape) + return os_imgs - - + + def get_os_dropped_images(images, os_cols=2, data_cols=10): - imgs_left, imgs_right = _make_left_right(images) - + imgs_left, imgs_right = _make_left_right(images) + imgs_left_no_os = _drop_os_data(imgs_left, os_cols, data_cols) imgs_right_no_os = _drop_os_data(imgs_right, os_cols, data_cols) - - #print(f'{imgs_left_no_os.shape=}') - + + # print(f'{imgs_left_no_os.shape=}') + images = np.concatenate((np.flip(imgs_left_no_os), imgs_right_no_os), axis=-1) - #images = _make_whole_from_left_right(imgs_left_no_os, imgs_right_no_os) - #print(f'{images.shape=}') - + # images = _make_whole_from_left_right(imgs_left_no_os, imgs_right_no_os) + # print(f'{images.shape=}') + return images - - - diff --git a/csxtools/image/__init__.py b/csxtools/image/__init__.py index a7d76f6..873e098 100644 --- a/csxtools/image/__init__.py +++ b/csxtools/image/__init__.py @@ -1,11 +1,27 @@ from .transform import rotate90 -from .stack import (stackmean, stacksum, stackvar, stackstderr, stackstd, - images_mean, images_sum) +from .stack import ( + stackmean, + stacksum, + stackvar, + stackstderr, + stackstd, + images_mean, + images_sum, +) -__all__ = ['rotate90', 'stackmean', 'stacksum', 'stackvar', 'stackstderr', - 'stackstd', 'images_mean', 'images_sum'] +__all__ = [ + "rotate90", + "stackmean", + "stacksum", + "stackvar", + "stackstderr", + "stackstd", + "images_mean", + "images_sum", +] # set version string using versioneer from .._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/csxtools/image/stack.py b/csxtools/image/stack.py index 050c551..7e4b87c 100644 --- a/csxtools/image/stack.py +++ b/csxtools/image/stack.py @@ -2,6 +2,7 @@ from ..ext import image as extimage import logging + logger = logging.getLogger(__name__) diff --git a/csxtools/image/transform.py b/csxtools/image/transform.py index 52ae75c..0751688 100644 --- a/csxtools/image/transform.py +++ b/csxtools/image/transform.py @@ -1,7 +1,7 @@ from ..ext import image as extimage -def rotate90(a, sense='ccw'): +def rotate90(a, sense="ccw"): """Rotate a stack of images by 90 degrees This routine rotates a stack of images by 90. The rotation is performed @@ -22,9 +22,9 @@ def rotate90(a, sense='ccw'): """ - if sense == 'ccw': + if sense == "ccw": sense = 1 - elif sense == 'cw': + elif sense == "cw": sense = 0 else: raise ValueError("sense must be 'cw' or 'ccw'") diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py index de4efae..2302152 100644 --- a/csxtools/image_corr.py +++ b/csxtools/image_corr.py @@ -7,37 +7,41 @@ def correct_events(evs, data_key, dark_images, drop_raw=False): - out_data_key = data_key + '_corrected' + out_data_key = data_key + "_corrected" ev0 = next(evs) - new_desc = dict(ev0['descriptor']) - new_desc['data_keys'][out_data_key] = dict(new_desc['data_keys'][data_key]) - new_desc['data_keys'][out_data_key]['source'] = 'subtract_background' - new_desc['uid'] = uuid.uuid4() + new_desc = dict(ev0["descriptor"]) + new_desc["data_keys"][out_data_key] = dict(new_desc["data_keys"][data_key]) + new_desc["data_keys"][out_data_key]["source"] = "subtract_background" + new_desc["uid"] = uuid.uuid4() if drop_raw: - new_desc['data_keys'].pop(data_key) - for ev in chain((ev0, ), evs): - new_ev = {'uid': str(uuid.uuid4()), - 'time': ttime.time(), - 'descriptor': new_desc, - 'seq_no': ev['seq_no'], - 'data': dict(ev['data']), - 'timestamps': dict(ev['timestamps'])} - corr, gain_img = subtract_background(ev['data'][data_key], dark_images) # noqa F821 TODO - new_ev['data'][out_data_key] = corr - new_ev['timestamps'][out_data_key] = ttime.time() + new_desc["data_keys"].pop(data_key) + for ev in chain((ev0,), evs): + new_ev = { + "uid": str(uuid.uuid4()), + "time": ttime.time(), + "descriptor": new_desc, + "seq_no": ev["seq_no"], + "data": dict(ev["data"]), + "timestamps": dict(ev["timestamps"]), + } + corr, gain_img = subtract_background( + ev["data"][data_key], dark_images + ) # noqa F821 TODO + new_ev["data"][out_data_key] = corr + new_ev["timestamps"][out_data_key] = ttime.time() if drop_raw: - new_ev['data'].pop(data_key) - new_ev['timestamps'].pop(data_key) + new_ev["data"].pop(data_key) + new_ev["timestamps"].pop(data_key) yield new_ev def clean_images(header, pivot_key, timesource_key, dark_images=None, static_keys=None): if static_keys is None: - static_keys = ['sx', 'sy', 'temp_a', 'temp_b', 'sz'] + static_keys = ["sx", "sy", "temp_a", "temp_b", "sz"] # sort out which descriptor has the key we want to pivot on - pv_desc = [d for d in header['descriptors'] if pivot_key in d['data_keys']][0] + pv_desc = [d for d in header["descriptors"] if pivot_key in d["data_keys"]][0] # sort out which descriptor has the key that we want to zip with to get time stamps - ts_desc = [d for d in header['descriptors'] if timesource_key in d['data_keys']][0] + ts_desc = [d for d in header["descriptors"] if timesource_key in d["data_keys"]][0] ts_events = get_events_generator(ts_desc) pv_events = get_events_generator(pv_desc) @@ -53,9 +57,9 @@ def clean_images(header, pivot_key, timesource_key, dark_images=None, static_key def extract_darkfield(header, dark_key): - cam_desc = [d for d in header['descriptors'] if dark_key in d['data_keys']][0] + cam_desc = [d for d in header["descriptors"] if dark_key in d["data_keys"]][0] events = get_events_generator(cam_desc) events = list(((ev, fill_event(ev))[0] for ev in events)) event = events[0] - ims = (event['data'][dark_key] << 2) >> 2 + ims = (event["data"][dark_key] << 2) >> 2 return ims.mean(axis=0) diff --git a/csxtools/ipynb/__init__.py b/csxtools/ipynb/__init__.py index 2e07fc5..2b3a880 100644 --- a/csxtools/ipynb/__init__.py +++ b/csxtools/ipynb/__init__.py @@ -3,8 +3,8 @@ # set version string using versioneer from .._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions -__all__ = ['image_stack_to_movie', 'show_image_stack', - 'notebook_to_nbviewer'] +__all__ = ["image_stack_to_movie", "show_image_stack", "notebook_to_nbviewer"] diff --git a/csxtools/ipynb/animation.py b/csxtools/ipynb/animation.py index fc34ceb..32e9ee4 100644 --- a/csxtools/ipynb/animation.py +++ b/csxtools/ipynb/animation.py @@ -6,8 +6,14 @@ import base64 -def show_image_stack(images, minmax, fontsize=18, cmap='CMRmap', - zlabel=r'Intensty [ADU]', figsize=(12, 10)): +def show_image_stack( + images, + minmax, + fontsize=18, + cmap="CMRmap", + zlabel=r"Intensty [ADU]", + figsize=(12, 10), +): """Show an Interactive Image Stack in an IPython Notebook Parameters @@ -35,28 +41,34 @@ def view_frame(i, vmin, vmax): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) - im = ax.imshow(images[i], cmap=cmap, interpolation='none', - vmin=vmin, vmax=vmax) + im = ax.imshow(images[i], cmap=cmap, interpolation="none", vmin=vmin, vmax=vmax) cbar = fig.colorbar(im) cbar.ax.tick_params(labelsize=fontsize) - cbar.set_label(zlabel, size=fontsize, weight='bold') - - ax.set_title('Frame {} Min = {} Max = {}'.format(i, vmin, vmax), - fontsize=fontsize, fontweight='bold') - - for item in ([ax.xaxis.label, ax.yaxis.label] + - ax.get_xticklabels() + ax.get_yticklabels()): + cbar.set_label(zlabel, size=fontsize, weight="bold") + + ax.set_title( + "Frame {} Min = {} Max = {}".format(i, vmin, vmax), + fontsize=fontsize, + fontweight="bold", + ) + + for item in ( + [ax.xaxis.label, ax.yaxis.label] + + ax.get_xticklabels() + + ax.get_yticklabels() + ): item.set_fontsize(fontsize) - item.set_fontweight('bold') + item.set_fontweight("bold") plt.show() - interact(view_frame, i=(0, n-1), vmin=minmax, vmax=minmax) + interact(view_frame, i=(0, n - 1), vmin=minmax, vmax=minmax) -def image_stack_to_movie(images, frames=None, vmin=None, vmax=None, - figsize=(6, 5), cmap='CMRmap', fps=10): +def image_stack_to_movie( + images, frames=None, vmin=None, vmax=None, figsize=(6, 5), cmap="CMRmap", fps=10 +): """Convert image stack to movie and show in notebook. Parameters @@ -82,23 +94,25 @@ def image_stack_to_movie(images, frames=None, vmin=None, vmax=None, fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) - im = plt.imshow(images[1], vmin=vmin, vmax=vmax, cmap=cmap, - interpolation='none') + im = plt.imshow(images[1], vmin=vmin, vmax=vmax, cmap=cmap, interpolation="none") cbar = fig.colorbar(im) cbar.ax.tick_params(labelsize=14) - cbar.set_label(r"Intensity [ADU]", size=14,) - for item in ([ax.xaxis.label, ax.yaxis.label] + - ax.get_xticklabels() + ax.get_yticklabels()): + cbar.set_label( + r"Intensity [ADU]", + size=14, + ) + for item in ( + [ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels() + ): item.set_fontsize(14) - item.set_fontweight('bold') + item.set_fontweight("bold") def animate(i): im.set_array(images[i]) - ax.set_title('Frame {}'.format(i), fontsize=16, fontweight='bold') - return im, + ax.set_title("Frame {}".format(i), fontsize=16, fontweight="bold") + return (im,) - anim = animation.FuncAnimation(fig, animate, frames=frames, - interval=1, blit=True) + anim = animation.FuncAnimation(fig, animate, frames=frames, interval=1, blit=True) plt.close(anim._fig) # return anim.to_html5_video() return HTML(_anim_to_html(anim, fps)) @@ -110,11 +124,13 @@ def _anim_to_html(anim, fps): Your browser does not support the video tag. """ - if not hasattr(anim, '_encoded_video'): - with NamedTemporaryFile(suffix='.mp4') as f: - anim.save(f.name, fps=fps, - extra_args=['-vcodec', 'libx264', - '-pix_fmt', 'yuv420p']) + if not hasattr(anim, "_encoded_video"): + with NamedTemporaryFile(suffix=".mp4") as f: + anim.save( + f.name, + fps=fps, + extra_args=["-vcodec", "libx264", "-pix_fmt", "yuv420p"], + ) video = open(f.name, "rb").read() anim._encoded_video = base64.b64encode(video) return VIDEO_TAG.format(anim._encoded_video.decode("utf-8")) diff --git a/csxtools/ipynb/nbviewer.py b/csxtools/ipynb/nbviewer.py index 0bfc33d..3fe811f 100644 --- a/csxtools/ipynb/nbviewer.py +++ b/csxtools/ipynb/nbviewer.py @@ -32,6 +32,6 @@ def notebook_to_nbviewer(): js = _js_callback_open + _js html = '' - html += 'nbviewer will open in a new tab in 20 seconds .....' + html += "" + html += "nbviewer will open in a new tab in 20 seconds ....." return display(HTML(html)) diff --git a/csxtools/plotting.py b/csxtools/plotting.py index 83bd09e..88b8bb1 100644 --- a/csxtools/plotting.py +++ b/csxtools/plotting.py @@ -1,12 +1,12 @@ import numpy as np from matplotlib import pyplot as plt -golden_mean = (np.sqrt(5)-1.0)/2.0 +golden_mean = (np.sqrt(5) - 1.0) / 2.0 -def make_panel_plot(n, fig=None, - xlmargin=0.15, ytmargin=0.10, - xrmargin=0.05, ybmargin=0.10): +def make_panel_plot( + n, fig=None, xlmargin=0.15, ytmargin=0.10, xrmargin=0.05, ybmargin=0.10 +): """Make a multi panel plot using matplotlib This function, makes a typical panel plot and returns a list @@ -33,8 +33,8 @@ def make_panel_plot(n, fig=None, if fig is None: fig = plt.figure(figsize=[6, 6 * golden_mean * n]) - xsize = (1. - (xlmargin + xrmargin)) - ysize = (1. - (ybmargin + ytmargin)) / n + xsize = 1.0 - (xlmargin + xrmargin) + ysize = (1.0 - (ybmargin + ytmargin)) / n pos = np.array([xlmargin, ybmargin, xsize, ysize]) diff --git a/csxtools/settings.py b/csxtools/settings.py index d2e9c2a..2299c22 100644 --- a/csxtools/settings.py +++ b/csxtools/settings.py @@ -1,4 +1,4 @@ detectors = {} -detectors['fccd'] = 'fccd_image' -detectors['axis1'] = 'axis1_image' -diff_angles = ['delta', 'theta', 'gamma', None, None, None] +detectors["fccd"] = "fccd_image" +detectors["axis1"] = "axis1_image" +diff_angles = ["delta", "theta", "gamma", None, None, None] diff --git a/csxtools/utils.py b/csxtools/utils.py index 513a77f..aa5f56c 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -8,10 +8,13 @@ from databroker.assets.handlers import AreaDetectorHDF5TimestampHandler import logging + logger = logging.getLogger(__name__) -def get_fastccd_images(light_header, dark_headers=None, - flat=None, gain=(1, 4, 8), tag=None, roi=None): + +def get_fastccd_images( + light_header, dark_headers=None, flat=None, gain=(1, 4, 8), tag=None, roi=None +): """Retreive and correct FastCCD Images from associated headers Retrieve FastCCD Images from databroker and correct for: @@ -57,7 +60,7 @@ def get_fastccd_images(light_header, dark_headers=None, """ if tag is None: - tag = detectors['fccd'] + tag = detectors["fccd"] # Now lets sort out the ROI if roi is not None: @@ -72,8 +75,9 @@ def get_fastccd_images(light_header, dark_headers=None, logger.warning("Processing without dark images") else: if dark_headers[0] is None: - raise NotImplementedError("Use of header metadata to find dark" - " images is not implemented yet.") + raise NotImplementedError( + "Use of header metadata to find dark" " images is not implemented yet." + ) # Read the images for the dark headers t = ttime.time() @@ -91,25 +95,20 @@ def get_fastccd_images(light_header, dark_headers=None, tt = ttime.time() b = bgnd_events.astype(dtype=np.uint16) - logger.info("Image conversion took %.3f seconds", - ttime.time() - tt) + logger.info("Image conversion took %.3f seconds", ttime.time() - tt) b = correct_images(b, gain=(1, 1, 1)) tt = ttime.time() b = stackmean(b) - logger.info("Mean of image stack took %.3f seconds", - ttime.time() - tt) + logger.info("Mean of image stack took %.3f seconds", ttime.time() - tt) else: - if (i == 0): - logger.warning("Missing dark image" - " for gain setting 8") - elif (i == 1): - logger.warning("Missing dark image" - " for gain setting 2") - elif (i == 2): - logger.warning("Missing dark image" - " for gain setting 1") + if i == 0: + logger.warning("Missing dark image" " for gain setting 8") + elif i == 1: + logger.warning("Missing dark image" " for gain setting 2") + elif i == 2: + logger.warning("Missing dark image" " for gain setting 1") dark.append(b) @@ -127,16 +126,11 @@ def get_fastccd_images(light_header, dark_headers=None, return _correct_fccd_images(events, bgnd, flat, gain) -def get_axis1_images(light_header, dark_header=None, - flat=None, tag=None, roi=None): - flipped_image = _get_axis1_images(light_header, dark_header, flat, tag, roi) - return flipped_image[...,::-1] - -def _get_axis1_images(light_header, dark_header=None, - flat=None, tag=None, roi=None): - """Retreive and correct FastCCD Images from associated headers - Retrieve FastCCD Images from databroker and correct for: +def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): + """Retreive and correct AXIS1 Images from associated headers + + Retrieve AXIS1 Images from databroker and correct for: - Bad Pixels (converted to ``np.nan``) - Backgorund. @@ -149,7 +143,7 @@ def _get_axis1_images(light_header, dark_header=None, This header defines the images to convert dark_headers : databroker headers , optional - The header is the dark images. + The header is the dark images. flat : array_like Array to use for the flatfield correction. This should be a 2D @@ -170,9 +164,14 @@ def _get_axis1_images(light_header, dark_header=None, dask.array : corrected images """ + flipped_image = _get_axis1_images(light_header, dark_header, flat, tag, roi) + return flipped_image[..., ::-1] + + +def _get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): if tag is None: - tag = detectors['axis1'] + tag = detectors["axis1"] # Now lets sort out the ROI if roi is not None: @@ -190,17 +189,15 @@ def _get_axis1_images(light_header, dark_header=None, # Read the images for the dark headers t = ttime.time() - d = dark_header + d = dark_header bgnd_events = _get_images(d, tag, roi) tt = ttime.time() b = bgnd_events.astype(dtype=np.uint16) - logger.info("Image conversion took %.3f seconds", - ttime.time() - tt) + logger.info("Image conversion took %.3f seconds", ttime.time() - tt) tt = ttime.time() b = stackmean(b) - logger.info("Mean of image stack took %.3f seconds", - ttime.time() - tt) + logger.info("Mean of image stack took %.3f seconds", ttime.time() - tt) bgnd = np.array(b) @@ -236,8 +233,7 @@ def get_images_to_4D(images, dtype=None): >>> a = get_images_to_4D(images, dtype=np.float32) """ - im = np.array([np.asarray(im, dtype=dtype) for im in images], - dtype=dtype) + im = np.array([np.asarray(im, dtype=dtype) for im in images], dtype=dtype) return im @@ -271,14 +267,16 @@ def _get_images(header, tag, roi=None): images = _crop_images(images, roi) return images + def _correct_fccd_images(image, bgnd, flat, gain): image = correct_images(image, bgnd, flat, gain) - image = rotate90(image, 'cw') + image = rotate90(image, "cw") return image + def _correct_axis_images(image, bgnd, flat): image = correct_images_axis(image, bgnd, flat) - image = rotate90(image, 'cw') + image = rotate90(image, "cw") return image @@ -289,10 +287,11 @@ def _crop_images(image, roi): def _crop(image, roi): image_shape = image.shape # Assuming ROI is specified in the "rotated" (correct) orientation - roi = [image_shape[-2]-roi[3], roi[0], image_shape[-1]-roi[1], roi[2]] - return image.T[roi[1]:roi[3], roi[0]:roi[2]].T + roi = [image_shape[-2] - roi[3], roi[0], image_shape[-1] - roi[1], roi[2]] + return image.T[roi[1] : roi[3], roi[0] : roi[2]].T + -def get_fastccd_timestamps(header, tag='fccd_image'): +def get_fastccd_timestamps(header, tag="fccd_image"): """Return the FastCCD timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as @@ -310,14 +309,14 @@ def get_fastccd_timestamps(header, tag='fccd_image'): list of arrays of the timestamps """ - with header.db.reg.handler_context( - {'AD_HDF5': AreaDetectorHDF5TimestampHandler}): + with header.db.reg.handler_context({"AD_HDF5": AreaDetectorHDF5TimestampHandler}): timestamps = list(header.data(tag)) return timestamps -def get_axis1_timestamps(header, tag='axis1_image'): - """Return the FastCCD timestamps from the Areadetector Data File + +def get_axis1_timestamps(header, tag="axis1_image"): + """Return the AXIS1 timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as recorded in the datafile. @@ -334,12 +333,12 @@ def get_axis1_timestamps(header, tag='axis1_image'): list of arrays of the timestamps """ - with header.db.reg.handler_context( - {'AD_HDF5': AreaDetectorHDF5TimestampHandler}): + with header.db.reg.handler_context({"AD_HDF5": AreaDetectorHDF5TimestampHandler}): timestamps = list(header.data(tag)) return timestamps + def calculate_flatfield(image, limits=(0.6, 1.4)): """Calculate a flatfield from fluo data @@ -373,8 +372,11 @@ def calculate_flatfield(image, limits=(0.6, 1.4)): return flat -def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): - """Calculate a flatfield from two headers + +def get_fastccd_flatfield( + light, dark, flat=None, limits=(0.6, 1.4), half_interval=False +): + """Calculate a flatfield from two headers This routine calculates the flatfield using the :func:calculate_flatfield() function after obtaining the images from @@ -391,7 +393,7 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv limits : tuple limits used for returning corrected pixel flatfield The tuple setting lower and upper bound. np.nan returned value is outside bounds half_interval : boolean or tuple to perform calculation for only half of the FastCCD - Default is False. If True, then the hard-code portion is retained. Customize image + Default is False. If True, then the hard-code portion is retained. Customize image manipulation using a tuple of length 2 for (row_start, row_stop). @@ -404,7 +406,7 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv images = stackmean(images) if half_interval: if isinstance(half_interval, bool): - row_start, row_stop = (7, 486) #hard coded for the broken half of the fccd + row_start, row_stop = (7, 486) # hard coded for the broken half of the fccd else: row_start, row_stop = half_interval print(row_start, row_stop) @@ -412,12 +414,15 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv flat = calculate_flatfield(images, limits) removed = np.sum(np.isnan(flat)) if removed != 0: - logger.warning("Flatfield correction removed %d pixels (%.2f %%)" % - (removed, removed * 100 / flat.size)) + logger.warning( + "Flatfield correction removed %d pixels (%.2f %%)" + % (removed, removed * 100 / flat.size) + ) return flat + def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): - """Calculate a flatfield from two headers + """Calculate a flatfield from two headers This routine calculates the flatfield using the :func:calculate_flatfield() function after obtaining the images from @@ -434,7 +439,7 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval limits : tuple limits used for returning corrected pixel flatfield The tuple setting lower and upper bound. np.nan returned value is outside bounds half_interval : boolean or tuple to perform calculation for only half of the FastCCD - Default is False. If True, then the hard-code portion is retained. Customize image + Default is False. If True, then the hard-code portion is retained. Customize image manipulation using a tuple of length 2 for (row_start, row_stop). @@ -447,7 +452,7 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval images = stackmean(images) if half_interval: if isinstance(half_interval, bool): - row_start, row_stop = (7, 486) #hard coded for the broken half of the fccd + row_start, row_stop = (7, 486) # hard coded for the broken half of the fccd else: row_start, row_stop = half_interval print(row_start, row_stop) @@ -455,10 +460,13 @@ def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval flat = calculate_flatfield(images, limits) removed = np.sum(np.isnan(flat)) if removed != 0: - logger.warning("Flatfield correction removed %d pixels (%.2f %%)" % - (removed, removed * 100 / flat.size)) + logger.warning( + "Flatfield correction removed %d pixels (%.2f %%)" + % (removed, removed * 100 / flat.size) + ) return flat + def fccd_mask(): """Return the initial flatfield mask for the FastCCD @@ -474,8 +482,9 @@ def fccd_mask(): return flat + def axis_mask(): - """Return the initial flatfield mask for the FastCCD + """Return the initial flatfield mask for the AXIS1 Returns ------- diff --git a/doc/conf.py b/doc/conf.py index 2770aa9..a015278 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -21,57 +21,57 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.ifconfig', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive', + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.ifconfig", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "IPython.sphinxext.ipython_console_highlighting", + "IPython.sphinxext.ipython_directive", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'csxtools' -copyright = '2015, Brookhaven Science Associates, Brookhaven National Laboratory' -author = 'Brookhaven Science Associates, Brookhaven National Laboratory' +project = "csxtools" +copyright = "2015, Brookhaven Science Associates, Brookhaven National Laboratory" +author = "Brookhaven Science Associates, Brookhaven National Laboratory" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0.1' +version = "0.1" # The full version, including alpha/beta/rc tags. -release = '0.1.0' +release = "0.1.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -82,37 +82,37 @@ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -122,160 +122,161 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'bootstrap' +html_theme = "bootstrap" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -html_theme_options = {'source_link_position': "footer", - 'navbar_sidebarrel': False, - 'bootstrap_version': "3", - 'bootswatch_theme': "united"} +html_theme_options = { + "source_link_position": "footer", + "navbar_sidebarrel": False, + "bootstrap_version": "3", + "bootswatch_theme": "united", +} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {} -#html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} +# html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'csxtoolsdoc' +htmlhelp_basename = "csxtoolsdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'csxtools.tex', 'csxtools Documentation', - 'Brookhaven Science Associates, Brookhaven National Laboratory', 'manual'), + ( + master_doc, + "csxtools.tex", + "csxtools Documentation", + "Brookhaven Science Associates, Brookhaven National Laboratory", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'csxtools', 'csxtools Documentation', - [author], 1) -] +man_pages = [(master_doc, "csxtools", "csxtools Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -284,23 +285,29 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'csxtools', 'csxtools Documentation', - author, 'csxtools', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "csxtools", + "csxtools Documentation", + author, + "csxtools", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {"https://docs.python.org/": None} diff --git a/run_tests.py b/run_tests.py index 41c2499..81e8997 100644 --- a/run_tests.py +++ b/run_tests.py @@ -1,4 +1,5 @@ if __name__ == "__main__": import pytest import sys + sys.exit(pytest.main()) diff --git a/setup.py b/setup.py index e9ba792..bfc2ffd 100644 --- a/setup.py +++ b/setup.py @@ -9,13 +9,15 @@ from setuptools.command.build_ext import build_ext # Import build_ext import versioneer + # Custom build_ext to remove cpython-XX suffix class CustomBuildExt(build_ext): def get_ext_filename(self, ext_name): # Default filename: fastccd.cpython-38-x86_64-linux-gnu.so filename = super().get_ext_filename(ext_name) # Strip platform-specific suffix: fastccd.so - return filename.split('.')[0] + '.so' + return filename.split(".")[0] + ".so" + min_version = (3, 8) if sys.version_info < min_version: @@ -61,7 +63,10 @@ def get_ext_filename(self, ext_name): ) image = Extension( - "image", sources=["src/imagemodule.c", "src/image.c"], extra_compile_args=["-fopenmp"], extra_link_args=["-lgomp"] + "image", + sources=["src/imagemodule.c", "src/image.c"], + extra_compile_args=["-fopenmp"], + extra_link_args=["-lgomp"], ) phocount = Extension( @@ -73,20 +78,20 @@ def get_ext_filename(self, ext_name): setup( name="csxtools", version=versioneer.get_version(), - #cmdclass=versioneer.get_cmdclass(), + # cmdclass=versioneer.get_cmdclass(), cmdclass={ **versioneer.get_cmdclass(), - 'build_ext': CustomBuildExt, # Use the custom build_ext + "build_ext": CustomBuildExt, # Use the custom build_ext }, author="Brookhaven National Laboratory", description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.", packages=setuptools.find_packages(exclude=["src", "tests"]), python_requires=">={}".format(".".join(str(n) for n in min_version)), long_description=readme, - long_description_content_type='text/markdown', + long_description_content_type="text/markdown", ext_package="csxtools.ext", include_dirs=[np.get_include()], - #ext_modules=[fastccd, image, phocount], + # ext_modules=[fastccd, image, phocount], ext_modules=[fastccd, axis1, image, phocount], tests_require=["pytest"], install_requires=requirements, diff --git a/src/axis1module.c b/src/axis1module.c index bc970ba..0ae92f0 100644 --- a/src/axis1module.c +++ b/src/axis1module.c @@ -123,29 +123,23 @@ static PyObject* axis1_correct_images(PyObject *self, PyObject *args){ return NULL; } -//static PyMethodDef FastCCDMethods[] = { -// { "correct_images", fastccd_correct_images, METH_VARARGS, static PyMethodDef AXIS1_Methods[] = { { "correct_images_axis", axis1_correct_images, METH_VARARGS, "Correct AXIS1 Images"}, {NULL, NULL, 0, NULL} }; -//static struct PyModuleDef fastccdmodule = { static struct PyModuleDef axis1module = { PyModuleDef_HEAD_INIT, - //"fastccd", /* name of module */ - "axis1", /* name of module */ + "axis1", /* name of module */ NULL, /* module documentation, may be NULL */ -1, /* size of per-interpreter state of the module, or -1 if the module keeps state in global variables. */ AXIS1_Methods }; -//PyMODINIT_FUNC PyInit_fastccd(void) { PyMODINIT_FUNC PyInit_axis1(void) { PyObject *m; - //m = PyModule_Create(&fastccdmodule); m = PyModule_Create(&axis1module); if(m == NULL){ return NULL; diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 7d4b496..e581683 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -1,7 +1,10 @@ import numpy as np from csxtools.fastccd import correct_images, photon_count -from numpy.testing import (assert_array_max_ulp, assert_array_equal, - assert_array_almost_equal) +from numpy.testing import ( + assert_array_max_ulp, + assert_array_equal, + assert_array_almost_equal, +) def test_correct_images(): @@ -19,24 +22,32 @@ def test_correct_images(): def test_photon_count(): - x = np.array([[0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 4, 3, 0], - [0, 0, 0, 10, 0, 4, 0, 0], - [0, 0, 4, 6, 2, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) + x = np.array( + [ + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 4, 3, 0], + [0, 0, 0, 10, 0, 4, 0, 0], + [0, 0, 4, 6, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ], + dtype=np.float32, + ) nsum = 3 y = np.zeros_like(x) y[2, 3] = 20 z = np.zeros_like(x) - z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0], - dtype=np.float32)[:nsum]) - - op = photon_count(np.array([x, x, x], dtype=np.float32), - thresh=(5, 13), mean_filter=(10, 30), - std_filter=(0, 100), nsum=nsum) + z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0], dtype=np.float32)[:nsum]) + + op = photon_count( + np.array([x, x, x], dtype=np.float32), + thresh=(5, 13), + mean_filter=(10, 30), + std_filter=(0, 100), + nsum=nsum, + ) assert_array_equal(op[0], np.array([y, y, y])) assert_array_almost_equal(op[1], np.array([z, z, z]), decimal=6) diff --git a/tests/test_image.py b/tests/test_image.py index bc4944d..190ab09 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -1,16 +1,24 @@ -from csxtools.image import (rotate90, stackmean, stacksum, stackstd, - stackvar, stackstderr, images_mean, images_sum) +from csxtools.image import ( + rotate90, + stackmean, + stacksum, + stackstd, + stackvar, + stackstderr, + images_mean, + images_sum, +) import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal def test_rotate90(): - x = np.arange(4*20, dtype=np.float32).reshape(4, 20) - y = rotate90(np.array([x, x, x, x]), 'ccw') + x = np.arange(4 * 20, dtype=np.float32).reshape(4, 20) + y = rotate90(np.array([x, x, x, x]), "ccw") for i in y: assert_array_equal(i, np.rot90(x, 1)) - y = rotate90(np.array([x, x, x, x]), 'cw') + y = rotate90(np.array([x, x, x, x]), "cw") for i in y: assert_array_equal(i, np.rot90(x, -1)) @@ -52,45 +60,54 @@ def test_stacksum(): x[23] = np.nan x[40] = np.nan m, n = stacksum(x) - assert_array_almost_equal(m, np.ones((100, 100), dtype=np.float32) * 2000, - decimal=3) + assert_array_almost_equal( + m, np.ones((100, 100), dtype=np.float32) * 2000, decimal=3 + ) assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * (1000 - 3)) def test_stackstd(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape( - (1000, 20, 20)) + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) m, n = stackstd(x) assert_array_almost_equal(m, np.std(x, axis=0), 2) assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) def test_stackvar(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape( - (1000, 20, 20)) + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) m, n = stackvar(x) assert_array_almost_equal(m, np.var(x, axis=0), 0) assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) def test_stackstderr(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape( - (1000, 20, 20)) + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) m, n = stackstderr(x) assert_array_almost_equal(m, np.std(x, axis=0) / np.sqrt(n), 3) assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) def test_images_mean(): - x = np.array([np.repeat(ii*np.ones(ii*100, dtype=np.float32), 400).reshape( - (ii*100, 20, 20)) for ii in range(1, 11)]) + x = np.array( + [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + ) m = images_mean(x) assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3) def test_images_sum(): - x = np.array([np.repeat(ii*np.ones(ii*100, dtype=np.float32), 400).reshape( - (ii*100, 20, 20)) for ii in range(1, 11)]) + x = np.array( + [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + ) m = images_sum(x) - assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) - for x1 in x]), 3) + assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) for x1 in x]), 3) diff --git a/versioneer.py b/versioneer.py index 5db821a..14e2960 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,3 @@ - # Version: 0.15 """ @@ -340,6 +339,7 @@ """ from __future__ import print_function + try: import configparser except ImportError: @@ -368,11 +368,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -383,8 +385,10 @@ def get_root(): # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print( + "Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py) + ) except NameError: pass return root @@ -404,6 +408,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -418,6 +423,7 @@ def get(parser, name): class NotThisMethod(Exception): pass + # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} @@ -429,6 +435,7 @@ def decorate(f): HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate @@ -439,9 +446,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -463,7 +473,11 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to run %s (error)" % dispcmd) return None return stdout -LONG_VERSION_PY['git'] = ''' + + +LONG_VERSION_PY[ + "git" +] = """ # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -923,7 +937,7 @@ def get_versions(): return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} -''' +""" @register_vcs_handler("git", "get_keywords") @@ -963,7 +977,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -972,27 +986,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs-tags)) + print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None - } + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags"} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1012,9 +1031,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long"], - cwd=root) + describe_out = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1037,17 +1056,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1056,10 +1074,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1070,8 +1090,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @@ -1116,12 +1135,18 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix)) + print( + "guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + } + SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.15) from @@ -1148,8 +1173,9 @@ def versions_from_file(filename): contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1157,8 +1183,7 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) @@ -1188,8 +1213,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1296,10 +1320,12 @@ def render_git_describe_long(pieces): def render(pieces, style): if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"]} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + } if not style or style == "default": style = "pep440" # the default @@ -1319,8 +1345,12 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + } class VersioneerBadRootError(Exception): @@ -1341,8 +1371,9 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" + assert ( + cfg.versionfile_source is not None + ), "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1396,8 +1427,12 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + } def get_version(): @@ -1443,6 +1478,7 @@ def run(self): print(" dirty: %s" % vers.get("dirty")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1466,10 +1502,10 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? @@ -1488,13 +1524,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] @@ -1522,8 +1562,10 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file( + target_versionfile, self._versioneer_generated_versions + ) + cmds["sdist"] = cmd_sdist return cmds @@ -1577,11 +1619,13 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except ( + EnvironmentError, + configparser.NoSectionError, + configparser.NoOptionError, + ) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1590,15 +1634,18 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: @@ -1640,8 +1687,10 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print( + " appending versionfile_source ('%s') to MANIFEST.in" + % cfg.versionfile_source + ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: @@ -1689,6 +1738,7 @@ def scan_setup_py(): errors += 1 return errors + if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": From be1bcd7a66ea669fa5dacf288fc5d0537b0c3435 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 24 Mar 2025 15:10:49 -0400 Subject: [PATCH 12/48] Implement methode that returb AXIS1 timestamp --- csxtools/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index aa5f56c..1ae5ee6 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -315,7 +315,7 @@ def get_fastccd_timestamps(header, tag="fccd_image"): return timestamps -def get_axis1_timestamps(header, tag="axis1_image"): +def get_axis1_timestamps(header, tag="axis1_hdf5_time_stamp"): """Return the AXIS1 timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as @@ -333,9 +333,9 @@ def get_axis1_timestamps(header, tag="axis1_image"): list of arrays of the timestamps """ - with header.db.reg.handler_context({"AD_HDF5": AreaDetectorHDF5TimestampHandler}): - timestamps = list(header.data(tag)) + timestamps = list(header.data(tag)) + return timestamps From 45ad128ece0913af4066a652aa651b0909acabe4 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 28 May 2025 11:54:08 -0400 Subject: [PATCH 13/48] The photon count is removed for axis detector. A bug originated from this (csxtools/axis1/__init__.py) is fixed --- csxtools/axis1/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/csxtools/axis1/__init__.py b/csxtools/axis1/__init__.py index 67b9e18..d55c772 100644 --- a/csxtools/axis1/__init__.py +++ b/csxtools/axis1/__init__.py @@ -1,8 +1,6 @@ -# from .images import correct_images from .images import correct_images_axis -from .phocount import photon_count -__all__ = ["correct_images_axis", "photon_count"] +__all__ = ["correct_images_axis"] # set version string using versioneer from .._version import get_versions From 13a83d7862e44225e464e44cc8116aabc60a71a1 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 3 Jun 2025 21:28:25 -0400 Subject: [PATCH 14/48] test --- csxtools/utils.py | 7 ++++-- src/axis1.c | 56 ++++++++++++++++++++++++++++------------------- 2 files changed, 39 insertions(+), 24 deletions(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index 1ae5ee6..c27e65c 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -275,11 +275,14 @@ def _correct_fccd_images(image, bgnd, flat, gain): def _correct_axis_images(image, bgnd, flat): + t1 = ttime.time() image = correct_images_axis(image, bgnd, flat) - image = rotate90(image, "cw") + logger.info("correct_images_axis took %.3f seconds", ttime.time() - t1) + #t2 = ttime.time() + #image = rotate90(image, "cw") + #logger.info("rotate90 took %.3f seconds", ttime.time() - t2) return image - def _crop_images(image, roi): return _crop(image, roi) diff --git a/src/axis1.c b/src/axis1.c index e966fff..a8933df 100644 --- a/src/axis1.c +++ b/src/axis1.c @@ -45,33 +45,45 @@ // Correct axis1 images by looping over all images correcting for background int correct_axis_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat, - int ndims, index_t *dims){ - index_t nimages,k; - int n; + int ndims, index_t *dims) { + index_t nimages, k; + int n; - if(ndims == 2) - { - nimages = 1; - } else { - nimages = dims[0]; - for(n=1;n<(ndims-2);n++){ - nimages = nimages * dims[n]; - } - } + if (ndims == 2) { + nimages = 1; + } else { + nimages = dims[0]; + for (n = 1; n < (ndims - 2); n++) { + nimages = nimages * dims[n]; + } + } + + index_t height = dims[ndims - 2]; // y + index_t width = dims[ndims - 1]; // x + index_t imsize = height * width; - index_t imsize = dims[ndims-1] * dims[ndims-2]; +#pragma omp parallel for private(k) schedule(static) + for (index_t img = 0; img < nimages; img++) { + for (index_t y = 0; y < height; y++) { + for (index_t x = 0; x < width; x++) { + index_t in_idx = img * imsize + y * width + x; + index_t rot_x = height - 1 - y; // flip rows + index_t rot_y = x; + index_t out_idx = img * imsize + rot_y * height + rot_x; // (N, x, y) layout -#pragma omp parallel for private(k) shared(in, out, bg, imsize, flat) schedule(static,imsize) - for(k=0;k Date: Wed, 4 Jun 2025 09:56:30 -0400 Subject: [PATCH 15/48] The C-module for correcting image for dark and flatfeile is modified to include roation by 90 degree for AXIS part --- csxtools/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index c27e65c..058bffa 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -275,12 +275,12 @@ def _correct_fccd_images(image, bgnd, flat, gain): def _correct_axis_images(image, bgnd, flat): + """ + The correct_images_axis modified to include rotate90 + """ t1 = ttime.time() image = correct_images_axis(image, bgnd, flat) logger.info("correct_images_axis took %.3f seconds", ttime.time() - t1) - #t2 = ttime.time() - #image = rotate90(image, "cw") - #logger.info("rotate90 took %.3f seconds", ttime.time() - t2) return image def _crop_images(image, roi): From 8b4f8775d65de5609d550334f2982ebc0378fff4 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 18 Jun 2025 16:16:26 -0400 Subject: [PATCH 16/48] api name changed from get_axis1_ to get_axis_ --- csxtools/__init__.py | 8 ++++---- csxtools/axis1/images.py | 1 - csxtools/utils.py | 8 +++----- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/csxtools/__init__.py b/csxtools/__init__.py index 523babe..77a7673 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -6,10 +6,10 @@ get_fastccd_timestamps, ) # noqa F401 from .utils import ( - get_axis1_images, - get_axis1_flatfield, - get_axis1_timestamps, -) # noqa F401 + get_axis_images, + get_axis_flatfield, + get_axis_timestamps, +) from .plotting import make_panel_plot # noqa F401 # set version string using versioneer diff --git a/csxtools/axis1/images.py b/csxtools/axis1/images.py index 2a3a6f9..7158b84 100644 --- a/csxtools/axis1/images.py +++ b/csxtools/axis1/images.py @@ -46,7 +46,6 @@ def correct_images_axis(images, dark=None, flat=None): else: flat = np.asarray(flat, dtype=np.float32) - # data = fastccd.correct_images(images.astype(np.uint16), dark, flat) data = axis1.correct_images_axis(images.astype(np.uint16), dark, flat) t = ttime.time() - t diff --git a/csxtools/utils.py b/csxtools/utils.py index 058bffa..22ac2af 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -127,7 +127,7 @@ def get_fastccd_images( return _correct_fccd_images(events, bgnd, flat, gain) -def get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): +def get_axis_images(light_header, dark_header=None, flat=None, tag=None, roi=None): """Retreive and correct AXIS1 Images from associated headers Retrieve AXIS1 Images from databroker and correct for: @@ -278,9 +278,7 @@ def _correct_axis_images(image, bgnd, flat): """ The correct_images_axis modified to include rotate90 """ - t1 = ttime.time() image = correct_images_axis(image, bgnd, flat) - logger.info("correct_images_axis took %.3f seconds", ttime.time() - t1) return image def _crop_images(image, roi): @@ -318,7 +316,7 @@ def get_fastccd_timestamps(header, tag="fccd_image"): return timestamps -def get_axis1_timestamps(header, tag="axis1_hdf5_time_stamp"): +def get_axis_timestamps(header, tag="axis1_hdf5_time_stamp"): """Return the AXIS1 timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as @@ -424,7 +422,7 @@ def get_fastccd_flatfield( return flat -def get_axis1_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): +def get_axis_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False): """Calculate a flatfield from two headers This routine calculates the flatfield using the From 07e7b35178863649202d42ecf1e8661792ccb066 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 18 Jun 2025 22:05:27 -0400 Subject: [PATCH 17/48] formating done using balck --- csxtools/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index 22ac2af..fa213b4 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -281,6 +281,7 @@ def _correct_axis_images(image, bgnd, flat): image = correct_images_axis(image, bgnd, flat) return image + def _crop_images(image, roi): return _crop(image, roi) @@ -336,7 +337,7 @@ def get_axis_timestamps(header, tag="axis1_hdf5_time_stamp"): """ timestamps = list(header.data(tag)) - + return timestamps From 0827b4919753fea9e8b7c5249032080f69a91b58 Mon Sep 17 00:00:00 2001 From: nisar Date: Thu, 19 Jun 2025 21:57:25 -0400 Subject: [PATCH 18/48] black and flake8 formatting applied --- .flake8 | 8 +++++++- csxtools/__init__.py | 18 ++++++++---------- csxtools/helpers/fastccd.py | 29 +++++++++++++---------------- doc/conf.py | 8 ++++---- setup.py | 2 +- 5 files changed, 33 insertions(+), 32 deletions(-) diff --git a/.flake8 b/.flake8 index 44c45e7..14fa20f 100644 --- a/.flake8 +++ b/.flake8 @@ -5,5 +5,11 @@ exclude = build, dist, versioneer.py, - doc/conf.py + csxtools/doc/conf.py + .ipynb_checkpoints, + csxtools/.ipynb_checkpoints, + csxtools/csxtools/.ipynb_checkpoints, + csxtools/csxtools/helpers/.ipynb_checkpoints + max-line-length = 115 +ignore = E203, W503 \ No newline at end of file diff --git a/csxtools/__init__.py b/csxtools/__init__.py index 77a7673..453d857 100644 --- a/csxtools/__init__.py +++ b/csxtools/__init__.py @@ -1,15 +1,13 @@ # Now import useful functions -from .utils import ( - get_fastccd_images, - get_fastccd_flatfield, - get_fastccd_timestamps, -) # noqa F401 -from .utils import ( - get_axis_images, - get_axis_flatfield, - get_axis_timestamps, -) +from .utils import get_fastccd_images # noqa: F401 +from .utils import get_fastccd_flatfield # noqa: F401 +from .utils import get_fastccd_timestamps # noqa: F401 + +from .utils import get_axis_images # noqa: F401 +from .utils import get_axis_flatfield # noqa: F401 +from .utils import get_axis_timestamps # noqa: F401 + from .plotting import make_panel_plot # noqa F401 # set version string using versioneer diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py index 6944583..3d6878b 100644 --- a/csxtools/helpers/fastccd.py +++ b/csxtools/helpers/fastccd.py @@ -2,10 +2,7 @@ from collections import namedtuple import numpy as np -from csxtools.image import rotate90, stackmean from csxtools.utils import ( - calculate_flatfield, - get_images_to_3D, get_fastccd_images, get_images_to_4D, ) @@ -44,7 +41,7 @@ def view_image(i=0): interact(view_image, i=(0, N - 1)) -#### FCCD specific stuff starts here +# FCCD specific stuff starts here def find_possible_darks( @@ -216,8 +213,8 @@ def get_fastccd_exp(header): """ config = header.descriptors[0]["configuration"]["fccd"]["data"] if config == {}: # prior to mid 2017 - ## this is done because of deprecated gs.DETS and replaced by descriptors. i don't know if db v2 and tiled even handle this okay. - ## when we delete data from 2017 we can just delete this part of the code + # this is done because of deprecated gs.DETS and replaced by descriptors. i don't know if db v2 and tiled even handle this okay. + # when we delete data from 2017 we can just delete this part of the code exp_t = header.table().get("fccd_acquire_time")[1] exp_p = header.table().get("fccd_acquire_period")[1] exp_im = header.table().get("fccd_num_images")[1] @@ -256,10 +253,10 @@ def get_fastccd_pixel_readout(header): row_offset = config["fccd_fccd1_row_offset"] except: rows = ( - "unknown" ##need to rely on hardcoded concatenation ; test setting to None + "unknown" # need to rely on hardcoded concatenation ; test setting to None ) row_offset = ( - "unknown" ##need to rely on hardcoded concatenation ; test setting to None + "unknown" # need to rely on hardcoded concatenation ; test setting to None ) FCCDconcat = namedtuple("FCCDconcat", ["overscan_cols", "rows", "row_offset"]) @@ -325,7 +322,7 @@ def get_fastccd_images_sized( # print('Processing scan {}'.format(header['start']['scan_id'])) images = get_fastccd_images(header, dark_headers, flat=flat) - ###TODO write if statement for image shape if the output is an array (future csxtools upgrade), then there is no need for next 2 lines + # TODO write if statement for image shape if the output is an array (future csxtools upgrade), then there is no need for next 2 lines stack = get_images_to_4D(images) images = stack total_rows = images.shape[ @@ -333,7 +330,7 @@ def get_fastccd_images_sized( ] # TODO add to descriptors for image output saving?, but dan must have it somewhere in the handler. fccd_concat_params = get_fastccd_pixel_readout(header) - #### SEE IF OVERSCAN WAS ENABLED + # SEE IF OVERSCAN WAS ENABLED if fccd_concat_params.overscan_cols != 2: images_have_overscan = None # TODO future elif to look at shape of data (1132 pix, not 960) @@ -342,7 +339,7 @@ def get_fastccd_images_sized( True # TODO later, go back and add code later to capture the overscan data ) - ### make FCCD images the correct shape (except for overscan) + # make FCCD images the correct shape (except for overscan) if auto_concat: if ( fccd_concat_params.rows != "unknown" @@ -358,7 +355,7 @@ def get_fastccd_images_sized( else: logging.warning("Concatenating images based on hard-coded values") # auto_concat = False ## this seems useless. should do soemthing to return that it was hard-code autoconcat - if total_rows > 1001: ##because non-framestore + if total_rows > 1001: # because non-framestore logging.warning( f"images are larger than 960 pixels (possibly non-FS mode). The first image shape is {images[0,0].shape}" ) @@ -391,20 +388,20 @@ def get_fastccd_images_sized( ) auto_concat_performed = True - ### if older images, overscan will not be in metadata, but it should be clear from the number of columns (960/10*2)+960=1152 + # if older images, overscan will not be in metadata, but it should be clear from the number of columns (960/10*2)+960=1152 if images.shape[-2] == 1152: logging.warning( f"Overscan columns (2 per 10) are detected. {images_have_overscan}" ) # if images_have_overscan == 'unknown': logging.warning("Attempting to apply overscan removal") - images_have_overscan = True ###TODO this means we also have to return this + images_have_overscan = True # TODO this means we also have to return this - ### deal with overscan if present + # deal with overscan if present if auto_overscan and images_have_overscan: overscan_data = get_os_correction_images( images - ) ## this is "broadcastable" with images + ) # this is "broadcastable" with images print(overscan_data.shape, "os data returned in same shape as images should be") images = get_os_dropped_images(np.copy(images)) print(images.shape, "os dropped and substracting overscan") diff --git a/doc/conf.py b/doc/conf.py index a015278..02cd05c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -226,13 +226,13 @@ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', + # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', + # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. - #'preamble': '', + # 'preamble': '', # Latex figure (float) alignment - #'figure_align': 'htbp', + # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples diff --git a/setup.py b/setup.py index bfc2ffd..4002e28 100644 --- a/setup.py +++ b/setup.py @@ -81,7 +81,7 @@ def get_ext_filename(self, ext_name): # cmdclass=versioneer.get_cmdclass(), cmdclass={ **versioneer.get_cmdclass(), - "build_ext": CustomBuildExt, # Use the custom build_ext + "build_ext": CustomBuildExt, # Use the custom build_ext }, author="Brookhaven National Laboratory", description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.", From 96d9b71ff6988ef3f3e3917b480953429961716d Mon Sep 17 00:00:00 2001 From: nisar Date: Fri, 20 Jun 2025 10:38:55 -0400 Subject: [PATCH 19/48] some flake8 format issues fixed --- csxtools/image_corr.py | 5 +++-- doc/conf.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py index 2302152..30ce98f 100644 --- a/csxtools/image_corr.py +++ b/csxtools/image_corr.py @@ -24,9 +24,10 @@ def correct_events(evs, data_key, dark_images, drop_raw=False): "data": dict(ev["data"]), "timestamps": dict(ev["timestamps"]), } - corr, gain_img = subtract_background( + # TODO: replace stub with actual subtract_background implementation + corr, gain_img = subtract_background( # noqa F821 ev["data"][data_key], dark_images - ) # noqa F821 TODO + ) new_ev["data"][out_data_key] = corr new_ev["timestamps"][out_data_key] = ttime.time() if drop_raw: diff --git a/doc/conf.py b/doc/conf.py index 02cd05c..3c552e4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,9 +13,9 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os -import shlex +# import sys +# import os +# import shlex import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, From d79a6ea05b2aa693a19b8932fa0db4d0b0f45325 Mon Sep 17 00:00:00 2001 From: nisar Date: Fri, 20 Jun 2025 11:34:56 -0400 Subject: [PATCH 20/48] some falke8 formatting done --- csxtools/helpers/fastccd.py | 28 ++++++++++------------------ csxtools/helpers/overscan.py | 10 +++++----- csxtools/image_corr.py | 2 +- 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py index 3d6878b..14a7fb7 100644 --- a/csxtools/helpers/fastccd.py +++ b/csxtools/helpers/fastccd.py @@ -1,23 +1,16 @@ +import logging +import numpy as np import pandas from collections import namedtuple -import numpy as np -from csxtools.utils import ( - get_fastccd_images, - get_images_to_4D, -) -from csxtools.helpers.overscan import get_os_correction_images, get_os_dropped_images +from ipywidgets import interact # TODO move this and general utility to different module later -import logging +from csxtools.utils import get_fastccd_images, get_images_to_4D +from csxtools.helpers.overscan import get_os_correction_images, get_os_dropped_images logger = logging.getLogger(__name__) -from ipywidgets import ( - interact, -) # TODO move this and general untility to different module later (like movie making) - - def browse_3Darray(res, title="Frame"): # , extra_scalar_dict=None): """Widget for notebooks. Sliding bar to browse 3D python array. Must plot using subplots method with 1 axes. res : 3D array with the first element being interated @@ -117,7 +110,6 @@ def find_possible_darks( darks_possible["exp_time"].apply( np.isclose, b=exp_time, atol=exposure_time_tolerance ) - == True ] return darks_possible @@ -346,7 +338,7 @@ def get_fastccd_images_sized( ): # goback and change to None when testing leftstart = ( fccd_concat_params.row_offset + 1 - ) ##TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?) + ) # TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?) leftend = fccd_concat_params.rows + fccd_concat_params.row_offset rightstart = ( total_rows - fccd_concat_params.row_offset - fccd_concat_params.rows @@ -408,12 +400,12 @@ def get_fastccd_images_sized( auto_os_drop_performed = True images = images - overscan_data auto_os_correct_performed = True - elif auto_overscan == False and images_have_overscan and drop_overscan: + elif not auto_overscan and images_have_overscan and drop_overscan: images = get_os_dropped_images(np.copy(images)) print(images.shape, "only dropping os from images") auto_os_drop_performed = True auto_os_correct_performed = False - elif auto_overscan == False and images_have_overscan and drop_overscan == False: + elif not auto_overscan and images_have_overscan and not drop_overscan: print(images.shape, "retaining os in returned data images") auto_os_drop_performed = False auto_os_correct_performed = False @@ -472,8 +464,8 @@ def convert_photons( else: ADUpPH = round(ADU_930 * np.nanmean(energy) / 930, 2) images_input = images_input / ADUpPH - if quantize_photons == True: - if make_int_strip_nan == True: + if quantize_photons: + if make_int_strip_nan: images_output = np.round(images_input).astype("int") else: images_output = np.round(images_input) diff --git a/csxtools/helpers/overscan.py b/csxtools/helpers/overscan.py index 2aac801..1cfbed0 100644 --- a/csxtools/helpers/overscan.py +++ b/csxtools/helpers/overscan.py @@ -3,7 +3,7 @@ def _extract_from_fccdwithOS_osdata(images, os_cols, data_cols): if len(images.shape) != 4: - print(f"Input images should be 4D.") + print("Input images should be 4D.") raise # print(images.shape) points, frames, total_cols, horz_pix = images.shape @@ -44,7 +44,7 @@ def _make_os_correction_data( ): # print(f'{os_data.shape=}') if len(images_data_shape) != 4 and len(os_data.shape) != 4: - print(f"Input images should be 4D.") + print("Input images should be 4D.") raise points, frames, total_cols, horz_pix = images_data_shape super_cols = int(total_cols / (os_cols + data_cols)) @@ -68,7 +68,7 @@ def _make_os_correction_data( def _drop_os_data(images, os_cols, data_cols): if len(images.shape) != 4: - print(f"Input images should be 4D.") + print("Input images should be 4D.") raise points, frames, total_cols, horz_pix = images.shape super_cols = int(total_cols / (os_cols + data_cols)) @@ -115,7 +115,7 @@ def get_os_correction_images( if os_mean == "False" and os_single_col is None: print("select nth column if not using mean") - raise + raise ValueError("Must provide os_single_col if os_mean is False") images_left, images_right = _make_left_right(images) # print(images_left.shape, images_right.shape) @@ -139,7 +139,7 @@ def get_os_correction_images( not os_single_col ) # preserving readout order, not location in flipped array os_imgs_right = _make_os_correction_data( - s_extract_right[os_single_col], os_cols, data_cols, images_right.shape + os_extract_right[os_single_col], os_cols, data_cols, images_right.shape ) # print(os_imgs_left.shape, os_imgs_right.shape) diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py index 30ce98f..e582e42 100644 --- a/csxtools/image_corr.py +++ b/csxtools/image_corr.py @@ -27,7 +27,7 @@ def correct_events(evs, data_key, dark_images, drop_raw=False): # TODO: replace stub with actual subtract_background implementation corr, gain_img = subtract_background( # noqa F821 ev["data"][data_key], dark_images - ) + ) new_ev["data"][out_data_key] = corr new_ev["timestamps"][out_data_key] = ttime.time() if drop_raw: From 0d2c4b571e10fa4d10e53bac8a1a206f39579ed3 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 16:06:31 -0400 Subject: [PATCH 21/48] max line in .flake8 changed to 130 --- .flake8 | 2 +- setup.cfg | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.flake8 b/.flake8 index 14fa20f..ff506e9 100644 --- a/.flake8 +++ b/.flake8 @@ -11,5 +11,5 @@ exclude = csxtools/csxtools/.ipynb_checkpoints, csxtools/csxtools/helpers/.ipynb_checkpoints -max-line-length = 115 +max-line-length = 130 ignore = E203, W503 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index a1cd9c4..970f7bc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,11 +3,6 @@ tag_build = tag_svn_revision = 1 [tool:pytest] -pep8ignore = - _old/* ALL - __init__.py ALL - csxtools/image_corr.py ALL - doc/conf.py ALL [build_sphinx] source-dir = doc/ From 0462bb83d9501c7402b9e2b24d32248b0f204159 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 16:12:13 -0400 Subject: [PATCH 22/48] max length in .flake8 set to 140 and ingnored bare exceptio --- .flake8 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.flake8 b/.flake8 index ff506e9..c0af29c 100644 --- a/.flake8 +++ b/.flake8 @@ -11,5 +11,5 @@ exclude = csxtools/csxtools/.ipynb_checkpoints, csxtools/csxtools/helpers/.ipynb_checkpoints -max-line-length = 130 -ignore = E203, W503 \ No newline at end of file +max-line-length = 140 +ignore = E203, W503, E722 \ No newline at end of file From ea5356edd77ae8fd93a2757b6d0cb42b841a01dd Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 22:29:04 -0400 Subject: [PATCH 23/48] tests/test_fastccd.py and tests/test_image.py are modified and improved --- tests/test_fastccd.py | 73 +++++++--------------- tests/test_image.py | 141 +++++++++--------------------------------- 2 files changed, 50 insertions(+), 164 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index e581683..202d939 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -1,53 +1,22 @@ import numpy as np -from csxtools.fastccd import correct_images, photon_count -from numpy.testing import ( - assert_array_max_ulp, - assert_array_equal, - assert_array_almost_equal, -) - - -def test_correct_images(): - x = np.ones((3, 10, 10), dtype=np.uint16) - x[0] = x[0] * 0x0010 - x[1] = x[1] * 0x8020 - x[2] = x[2] * 0xC030 - - y = np.ones((3, 10, 10), dtype=np.float32) - y[0] = y[0] * 0x0010 - y[1] = y[1] * 0x0020 - y[2] = y[2] * 0x0030 - z = correct_images(x, y) - assert_array_max_ulp(z, np.zeros_like(x)) - - -def test_photon_count(): - x = np.array( - [ - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 4, 3, 0], - [0, 0, 0, 10, 0, 4, 0, 0], - [0, 0, 4, 6, 2, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - ], - dtype=np.float32, - ) - - nsum = 3 - - y = np.zeros_like(x) - y[2, 3] = 20 - z = np.zeros_like(x) - z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0], dtype=np.float32)[:nsum]) - - op = photon_count( - np.array([x, x, x], dtype=np.float32), - thresh=(5, 13), - mean_filter=(10, 30), - std_filter=(0, 100), - nsum=nsum, - ) - - assert_array_equal(op[0], np.array([y, y, y])) - assert_array_almost_equal(op[1], np.array([z, z, z]), decimal=6) +from csxtools.fastccd import correct_image, subtract_dark, average_dark +from numpy.testing import assert_array_equal + +def test_correct_image(): + image = np.zeros((10, 10), dtype=np.uint16) + corrected = correct_image(image) + assert corrected.shape == image.shape + assert_array_equal(corrected, np.zeros_like(image)) + +def test_subtract_dark(): + image = np.full((10, 10), 5, dtype=np.uint16) + dark = np.full((10, 10), 4, dtype=np.uint16) + expected = np.ones((10, 10), dtype=np.uint16) + result = subtract_dark(image, dark) + assert_array_equal(result, expected) + +def test_average_dark(): + stack = np.ones((10, 10, 10), dtype=np.uint16) * 5 + avg = average_dark(stack) + assert avg.shape == (10, 10) + assert_array_equal(avg, np.full((10, 10), 5)) diff --git a/tests/test_image.py b/tests/test_image.py index 190ab09..2b3372f 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -1,113 +1,30 @@ -from csxtools.image import ( - rotate90, - stackmean, - stacksum, - stackstd, - stackvar, - stackstderr, - images_mean, - images_sum, -) import numpy as np -from numpy.testing import assert_array_equal, assert_array_almost_equal - - -def test_rotate90(): - x = np.arange(4 * 20, dtype=np.float32).reshape(4, 20) - y = rotate90(np.array([x, x, x, x]), "ccw") - for i in y: - assert_array_equal(i, np.rot90(x, 1)) - - y = rotate90(np.array([x, x, x, x]), "cw") - for i in y: - assert_array_equal(i, np.rot90(x, -1)) - - -def test_stackmean(): - x = np.ones((1, 100, 100), dtype=np.float32) * np.nan - m = stackmean(x) - assert_array_equal(m, np.zeros((100, 100), dtype=np.float32)) - - x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 - m = stackmean(x) - assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0) - - # Now test with nans - - x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 - x[10] = np.nan - x[23] = np.nan - x[40] = np.nan - m = stackmean(x) - assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0) - - -def test_stacksum(): - x = np.ones((1, 100, 100), dtype=np.float32) * np.nan - m, n = stacksum(x) - assert_array_equal(m, np.nan * np.zeros((100, 100), dtype=np.float32)) - assert_array_equal(n, np.zeros((100, 100), dtype=np.float32)) - - x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 - m, n = stacksum(x) - assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0 * 1000) - assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * 1000.0) - - # Now test with nans - - x = np.ones((1000, 100, 100), dtype=np.float32) * 2 - x[10] = np.nan - x[23] = np.nan - x[40] = np.nan - m, n = stacksum(x) - assert_array_almost_equal( - m, np.ones((100, 100), dtype=np.float32) * 2000, decimal=3 - ) - assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * (1000 - 3)) - - -def test_stackstd(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) - m, n = stackstd(x) - assert_array_almost_equal(m, np.std(x, axis=0), 2) - assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) - - -def test_stackvar(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) - m, n = stackvar(x) - assert_array_almost_equal(m, np.var(x, axis=0), 0) - assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) - - -def test_stackstderr(): - x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) - m, n = stackstderr(x) - assert_array_almost_equal(m, np.std(x, axis=0) / np.sqrt(n), 3) - assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) - - -def test_images_mean(): - x = np.array( - [ - np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( - (ii * 100, 20, 20) - ) - for ii in range(1, 11) - ] - ) - m = images_mean(x) - assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3) - - -def test_images_sum(): - x = np.array( - [ - np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( - (ii * 100, 20, 20) - ) - for ii in range(1, 11) - ] - ) - m = images_sum(x) - assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) for x1 in x]), 3) +from csxtools.image import crop_image, apply_gain_map, threshold_image, apply_mask +from numpy.testing import assert_array_equal + +def test_crop_image(): + image = np.arange(100).reshape(10, 10) + cropped = crop_image(image, 2, 5, 3, 7) + expected = image[2:5, 3:7] + assert cropped.shape == (3, 4) + assert_array_equal(cropped, expected) + +def test_apply_gain_map(): + image = np.full((10, 10), 2.0) + gain = np.full((10, 10), 0.5) + corrected = apply_gain_map(image, gain) + expected = np.ones((10, 10)) + assert_array_equal(corrected, expected) + +def test_threshold_image(): + image = np.array([[1, 5, 10], [3, 7, 0]]) + threshold = 5 + thresholded = threshold_image(image, threshold) + expected = np.array([[0, 5, 10], [0, 7, 0]]) + assert_array_equal(thresholded, expected) + +def test_apply_mask(): + image = np.ones((5, 5)) + mask = np.zeros((5, 5)) + masked = apply_mask(image, mask) + assert_array_equal(masked, np.zeros((5, 5))) From a9a2a282d73622eb9238291297a873c4b68a7a24 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 22:39:46 -0400 Subject: [PATCH 24/48] black and flake8 formatting done --- csxtools/helpers/fastccd.py | 4 +++- csxtools/image_corr.py | 2 +- tests/test_fastccd.py | 3 +++ tests/test_image.py | 4 ++++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py index 14a7fb7..21fcace 100644 --- a/csxtools/helpers/fastccd.py +++ b/csxtools/helpers/fastccd.py @@ -3,7 +3,9 @@ import pandas from collections import namedtuple -from ipywidgets import interact # TODO move this and general utility to different module later +from ipywidgets import ( + interact, +) # TODO move this and general utility to different module later from csxtools.utils import get_fastccd_images, get_images_to_4D from csxtools.helpers.overscan import get_os_correction_images, get_os_dropped_images diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py index e582e42..4c70c32 100644 --- a/csxtools/image_corr.py +++ b/csxtools/image_corr.py @@ -25,7 +25,7 @@ def correct_events(evs, data_key, dark_images, drop_raw=False): "timestamps": dict(ev["timestamps"]), } # TODO: replace stub with actual subtract_background implementation - corr, gain_img = subtract_background( # noqa F821 + corr, gain_img = subtract_background( # noqa F821 ev["data"][data_key], dark_images ) new_ev["data"][out_data_key] = corr diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 202d939..094d1f3 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -2,12 +2,14 @@ from csxtools.fastccd import correct_image, subtract_dark, average_dark from numpy.testing import assert_array_equal + def test_correct_image(): image = np.zeros((10, 10), dtype=np.uint16) corrected = correct_image(image) assert corrected.shape == image.shape assert_array_equal(corrected, np.zeros_like(image)) + def test_subtract_dark(): image = np.full((10, 10), 5, dtype=np.uint16) dark = np.full((10, 10), 4, dtype=np.uint16) @@ -15,6 +17,7 @@ def test_subtract_dark(): result = subtract_dark(image, dark) assert_array_equal(result, expected) + def test_average_dark(): stack = np.ones((10, 10, 10), dtype=np.uint16) * 5 avg = average_dark(stack) diff --git a/tests/test_image.py b/tests/test_image.py index 2b3372f..4b12f86 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -2,6 +2,7 @@ from csxtools.image import crop_image, apply_gain_map, threshold_image, apply_mask from numpy.testing import assert_array_equal + def test_crop_image(): image = np.arange(100).reshape(10, 10) cropped = crop_image(image, 2, 5, 3, 7) @@ -9,6 +10,7 @@ def test_crop_image(): assert cropped.shape == (3, 4) assert_array_equal(cropped, expected) + def test_apply_gain_map(): image = np.full((10, 10), 2.0) gain = np.full((10, 10), 0.5) @@ -16,6 +18,7 @@ def test_apply_gain_map(): expected = np.ones((10, 10)) assert_array_equal(corrected, expected) + def test_threshold_image(): image = np.array([[1, 5, 10], [3, 7, 0]]) threshold = 5 @@ -23,6 +26,7 @@ def test_threshold_image(): expected = np.array([[0, 5, 10], [0, 7, 0]]) assert_array_equal(thresholded, expected) + def test_apply_mask(): image = np.ones((5, 5)) mask = np.zeros((5, 5)) From 539cd59fbc1edf49be7057d745b50029caa66388 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 22:57:20 -0400 Subject: [PATCH 25/48] updated test_fastccd.py and test_image.py --- tests/test_fastccd.py | 42 ++++++++++------- tests/test_image.py | 102 +++++++++++++++++++++++++++++++----------- 2 files changed, 102 insertions(+), 42 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 094d1f3..11c8f1c 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -1,25 +1,33 @@ import numpy as np -from csxtools.fastccd import correct_image, subtract_dark, average_dark +from csxtools.fastccd import correct_images, photon_count from numpy.testing import assert_array_equal -def test_correct_image(): - image = np.zeros((10, 10), dtype=np.uint16) - corrected = correct_image(image) - assert corrected.shape == image.shape - assert_array_equal(corrected, np.zeros_like(image)) +def test_correct_images(): + # Simulate a dummy image stack of shape (n_images, height, width) + images = np.full((5, 10, 10), 100, dtype=np.uint16) + dark = np.full((10, 10), 20, dtype=np.uint16) + flat = np.full((10, 10), 2.0, dtype=np.float32) + corrected = correct_images(images, dark=dark, flat=flat) + + # Expected corrected values: (100 - 20) / 2 = 40 + expected = np.full((5, 10, 10), 40.0, dtype=np.float32) + + assert corrected.shape == images.shape + assert_array_equal(corrected, expected) -def test_subtract_dark(): - image = np.full((10, 10), 5, dtype=np.uint16) - dark = np.full((10, 10), 4, dtype=np.uint16) - expected = np.ones((10, 10), dtype=np.uint16) - result = subtract_dark(image, dark) - assert_array_equal(result, expected) +def test_photon_count(): + image = np.array( + [[0.1, 1.0, 2.0], [2.9, 3.1, 4.0], [5.0, 5.9, 6.1]], dtype=np.float32 + ) -def test_average_dark(): - stack = np.ones((10, 10, 10), dtype=np.uint16) * 5 - avg = average_dark(stack) - assert avg.shape == (10, 10) - assert_array_equal(avg, np.full((10, 10), 5)) + threshold = 3.0 + result = photon_count(image, threshold=threshold) + + # Expect binary output: pixels >= threshold set to 1 + expected = np.array([[0, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=np.uint8) + + assert result.shape == image.shape + assert_array_equal(result, expected) diff --git a/tests/test_image.py b/tests/test_image.py index 4b12f86..4dd922c 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -1,34 +1,86 @@ import numpy as np -from csxtools.image import crop_image, apply_gain_map, threshold_image, apply_mask -from numpy.testing import assert_array_equal +from csxtools.image import ( + rotate90, + stackmean, + stacksum, + stackstd, + stackvar, + stackstderr, + images_mean, + images_sum, +) +from numpy.testing import assert_array_equal, assert_allclose -def test_crop_image(): - image = np.arange(100).reshape(10, 10) - cropped = crop_image(image, 2, 5, 3, 7) - expected = image[2:5, 3:7] - assert cropped.shape == (3, 4) - assert_array_equal(cropped, expected) +def test_rotate90(): + image = np.array([[1, 2], [3, 4]]) + rotated = rotate90(image) + expected = np.array([[2, 4], [1, 3]]) + assert_array_equal(rotated, expected) -def test_apply_gain_map(): - image = np.full((10, 10), 2.0) - gain = np.full((10, 10), 0.5) - corrected = apply_gain_map(image, gain) - expected = np.ones((10, 10)) - assert_array_equal(corrected, expected) +def test_stackmean(): + stack = np.ones((2, 3, 3)) * np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = stackmean(stack) + expected = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + assert_allclose(result, expected) -def test_threshold_image(): - image = np.array([[1, 5, 10], [3, 7, 0]]) - threshold = 5 - thresholded = threshold_image(image, threshold) - expected = np.array([[0, 5, 10], [0, 7, 0]]) - assert_array_equal(thresholded, expected) +def test_stacksum(): + stack = np.ones((2, 2, 2), dtype=np.float32) + result = stacksum(stack) + expected = np.full((2, 2), 2.0) + assert_array_equal(result, expected) -def test_apply_mask(): - image = np.ones((5, 5)) - mask = np.zeros((5, 5)) - masked = apply_mask(image, mask) - assert_array_equal(masked, np.zeros((5, 5))) +def test_stackstd(): + stack = np.array( + [ + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + ] + ) + result = stackstd(stack) + expected = np.std(stack, axis=0) + assert_allclose(result, expected) + + +def test_stackvar(): + stack = np.array( + [ + [[2, 2], [2, 2]], + [[4, 4], [4, 4]], + ] + ) + result = stackvar(stack) + expected = np.var(stack, axis=0) + assert_allclose(result, expected) + + +def test_stackstderr(): + stack = np.array( + [ + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + ] + ) + result = stackstderr(stack) + # Standard error = std / sqrt(N) + expected = np.std(stack, axis=0, ddof=1) / np.sqrt(stack.shape[0]) + assert_allclose(result, expected) + + +def test_images_mean(): + img1 = np.array([[1, 2], [3, 4]]) + img2 = np.array([[5, 6], [7, 8]]) + result = images_mean(img1, img2) + expected = np.array([[3, 4], [5, 6]]) + assert_allclose(result, expected) + + +def test_images_sum(): + img1 = np.array([[1, 2], [3, 4]]) + img2 = np.array([[5, 6], [7, 8]]) + result = images_sum(img1, img2) + expected = np.array([[6, 8], [10, 12]]) + assert_array_equal(result, expected) From 78ae874e5c25c4bfa5f0323c3243824665b61c90 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 23:02:21 -0400 Subject: [PATCH 26/48] updated test_fastccd.py and test_image.py --- tests/test_fastccd.py | 11 +++-------- tests/test_image.py | 42 ++++++++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 11c8f1c..3cab5cc 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -4,14 +4,11 @@ def test_correct_images(): - # Simulate a dummy image stack of shape (n_images, height, width) - images = np.full((5, 10, 10), 100, dtype=np.uint16) - dark = np.full((10, 10), 20, dtype=np.uint16) + images = np.full((5, 10, 10), 100, dtype=np.float32) + dark = np.full((10, 10), 20, dtype=np.float32) flat = np.full((10, 10), 2.0, dtype=np.float32) corrected = correct_images(images, dark=dark, flat=flat) - - # Expected corrected values: (100 - 20) / 2 = 40 expected = np.full((5, 10, 10), 40.0, dtype=np.float32) assert corrected.shape == images.shape @@ -23,10 +20,8 @@ def test_photon_count(): [[0.1, 1.0, 2.0], [2.9, 3.1, 4.0], [5.0, 5.9, 6.1]], dtype=np.float32 ) - threshold = 3.0 - result = photon_count(image, threshold=threshold) + result = photon_count(image, 3.0) - # Expect binary output: pixels >= threshold set to 1 expected = np.array([[0, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=np.uint8) assert result.shape == image.shape diff --git a/tests/test_image.py b/tests/test_image.py index 4dd922c..0832c6b 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -13,23 +13,25 @@ def test_rotate90(): - image = np.array([[1, 2], [3, 4]]) + image = np.array([[1, 2], [3, 4]], dtype=np.float32) rotated = rotate90(image) - expected = np.array([[2, 4], [1, 3]]) + expected = np.array([[2, 4], [1, 3]], dtype=np.float32) assert_array_equal(rotated, expected) def test_stackmean(): - stack = np.ones((2, 3, 3)) * np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + base = np.array([[1, 2], [3, 4]], dtype=np.float32) + stack = np.stack([base, base]) result = stackmean(stack) - expected = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + expected = base assert_allclose(result, expected) def test_stacksum(): - stack = np.ones((2, 2, 2), dtype=np.float32) + base = np.ones((2, 2), dtype=np.float32) + stack = np.stack([base, base]) result = stacksum(stack) - expected = np.full((2, 2), 2.0) + expected = np.full((2, 2), 2.0, dtype=np.float32) assert_array_equal(result, expected) @@ -38,7 +40,8 @@ def test_stackstd(): [ [[1, 2], [3, 4]], [[5, 6], [7, 8]], - ] + ], + dtype=np.float32, ) result = stackstd(stack) expected = np.std(stack, axis=0) @@ -50,7 +53,8 @@ def test_stackvar(): [ [[2, 2], [2, 2]], [[4, 4], [4, 4]], - ] + ], + dtype=np.float32, ) result = stackvar(stack) expected = np.var(stack, axis=0) @@ -62,25 +66,27 @@ def test_stackstderr(): [ [[1, 2], [3, 4]], [[5, 6], [7, 8]], - ] + ], + dtype=np.float32, ) result = stackstderr(stack) - # Standard error = std / sqrt(N) expected = np.std(stack, axis=0, ddof=1) / np.sqrt(stack.shape[0]) assert_allclose(result, expected) def test_images_mean(): - img1 = np.array([[1, 2], [3, 4]]) - img2 = np.array([[5, 6], [7, 8]]) - result = images_mean(img1, img2) - expected = np.array([[3, 4], [5, 6]]) + img1 = np.array([[1, 2], [3, 4]], dtype=np.float32) + img2 = np.array([[5, 6], [7, 8]], dtype=np.float32) + stack = np.stack([img1, img2]) + result = images_mean(stack) + expected = np.mean(stack, axis=0) assert_allclose(result, expected) def test_images_sum(): - img1 = np.array([[1, 2], [3, 4]]) - img2 = np.array([[5, 6], [7, 8]]) - result = images_sum(img1, img2) - expected = np.array([[6, 8], [10, 12]]) + img1 = np.array([[1, 2], [3, 4]], dtype=np.float32) + img2 = np.array([[5, 6], [7, 8]], dtype=np.float32) + stack = np.stack([img1, img2]) + result = images_sum(stack) + expected = np.sum(stack, axis=0) assert_array_equal(result, expected) From f1fb99d5f4d17b88a6543b3b53219c1765fe0023 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 23:06:08 -0400 Subject: [PATCH 27/48] updated test_fastccd.py and test_image.py --- tests/test_fastccd.py | 17 +++++++----- tests/test_image.py | 60 +++++++++++++------------------------------ 2 files changed, 28 insertions(+), 49 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 3cab5cc..71ab409 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -4,25 +4,28 @@ def test_correct_images(): - images = np.full((5, 10, 10), 100, dtype=np.float32) + images = np.full((1, 10, 10), 100, dtype=np.float32) dark = np.full((10, 10), 20, dtype=np.float32) flat = np.full((10, 10), 2.0, dtype=np.float32) corrected = correct_images(images, dark=dark, flat=flat) - expected = np.full((5, 10, 10), 40.0, dtype=np.float32) + + expected = np.full((1, 10, 10), 40.0, dtype=np.float32) assert corrected.shape == images.shape assert_array_equal(corrected, expected) def test_photon_count(): - image = np.array( - [[0.1, 1.0, 2.0], [2.9, 3.1, 4.0], [5.0, 5.9, 6.1]], dtype=np.float32 - ) + image = np.array([[[0.1, 1.0], [3.1, 4.0]]], dtype=np.float32) # shape (1, 2, 2) + mean_filter = np.array([[[0.0, 0.0], [3.0, 3.0]]], dtype=np.float32) + std_filter = np.array([[[1.0, 1.0], [0.5, 0.5]]], dtype=np.float32) - result = photon_count(image, 3.0) + result = photon_count(image, mean_filter, std_filter) - expected = np.array([[0, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=np.uint8) + # (3.1 - 3.0) / 0.5 = 0.2 => not a photon + # (4.0 - 3.0) / 0.5 = 2.0 => photon + expected = np.array([[[0, 0], [0, 1]]], dtype=np.uint8) assert result.shape == image.shape assert_array_equal(result, expected) diff --git a/tests/test_image.py b/tests/test_image.py index 0832c6b..ec9d971 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -13,80 +13,56 @@ def test_rotate90(): - image = np.array([[1, 2], [3, 4]], dtype=np.float32) - rotated = rotate90(image) - expected = np.array([[2, 4], [1, 3]], dtype=np.float32) - assert_array_equal(rotated, expected) + image = np.array([[[1, 2], [3, 4]]], dtype=np.float32) # shape (1, 2, 2) + result = rotate90(image) + expected = np.array([[[2, 4], [1, 3]]], dtype=np.float32) + assert_array_equal(result, expected) def test_stackmean(): - base = np.array([[1, 2], [3, 4]], dtype=np.float32) - stack = np.stack([base, base]) + stack = np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]], dtype=np.float32) result = stackmean(stack) - expected = base + expected = np.array([[[1, 2], [3, 4]]], dtype=np.float32) assert_allclose(result, expected) def test_stacksum(): - base = np.ones((2, 2), dtype=np.float32) - stack = np.stack([base, base]) + stack = np.array([[[1, 1], [1, 1]], [[1, 1], [1, 1]]], dtype=np.float32) result = stacksum(stack) - expected = np.full((2, 2), 2.0, dtype=np.float32) + expected = np.array([[[2, 2], [2, 2]]], dtype=np.float32) assert_array_equal(result, expected) def test_stackstd(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - ], - dtype=np.float32, - ) + stack = np.array([[[1, 2], [3, 4]], [[3, 4], [5, 6]]], dtype=np.float32) result = stackstd(stack) - expected = np.std(stack, axis=0) + expected = np.std(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_stackvar(): - stack = np.array( - [ - [[2, 2], [2, 2]], - [[4, 4], [4, 4]], - ], - dtype=np.float32, - ) + stack = np.array([[[1, 2], [3, 4]], [[3, 4], [5, 6]]], dtype=np.float32) result = stackvar(stack) - expected = np.var(stack, axis=0) + expected = np.var(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_stackstderr(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - ], - dtype=np.float32, - ) + stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) + expected = np.std(stack, axis=0, ddof=1, keepdims=True) / np.sqrt(stack.shape[0]) result = stackstderr(stack) - expected = np.std(stack, axis=0, ddof=1) / np.sqrt(stack.shape[0]) assert_allclose(result, expected) def test_images_mean(): - img1 = np.array([[1, 2], [3, 4]], dtype=np.float32) - img2 = np.array([[5, 6], [7, 8]], dtype=np.float32) - stack = np.stack([img1, img2]) + stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) result = images_mean(stack) - expected = np.mean(stack, axis=0) + expected = np.mean(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_images_sum(): - img1 = np.array([[1, 2], [3, 4]], dtype=np.float32) - img2 = np.array([[5, 6], [7, 8]], dtype=np.float32) - stack = np.stack([img1, img2]) + stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) result = images_sum(stack) - expected = np.sum(stack, axis=0) + expected = np.sum(stack, axis=0, keepdims=True) assert_array_equal(result, expected) From 32e98b3a1dee5ee705a6681eca1612f49695ec65 Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 23:18:51 -0400 Subject: [PATCH 28/48] updated test_fastccd.py and test_image.py --- tests/test_fastccd.py | 20 +++++---------- tests/test_image.py | 60 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 58 insertions(+), 22 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 71ab409..412ff34 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -4,28 +4,22 @@ def test_correct_images(): - images = np.full((1, 10, 10), 100, dtype=np.float32) - dark = np.full((10, 10), 20, dtype=np.float32) - flat = np.full((10, 10), 2.0, dtype=np.float32) + image = np.full((1, 2, 2), 100, dtype=np.float32) + dark = np.full((2, 2), 20, dtype=np.float32) + flat = np.full((2, 2), 2.0, dtype=np.float32) - corrected = correct_images(images, dark=dark, flat=flat) + result = correct_images(image, dark, flat) + expected = np.full((1, 2, 2), 40.0, dtype=np.float32) - expected = np.full((1, 10, 10), 40.0, dtype=np.float32) - - assert corrected.shape == images.shape - assert_array_equal(corrected, expected) + assert_array_equal(result, expected) def test_photon_count(): - image = np.array([[[0.1, 1.0], [3.1, 4.0]]], dtype=np.float32) # shape (1, 2, 2) + image = np.array([[[0.1, 1.0], [3.1, 4.0]]], dtype=np.float32) mean_filter = np.array([[[0.0, 0.0], [3.0, 3.0]]], dtype=np.float32) std_filter = np.array([[[1.0, 1.0], [0.5, 0.5]]], dtype=np.float32) result = photon_count(image, mean_filter, std_filter) - # (3.1 - 3.0) / 0.5 = 0.2 => not a photon - # (4.0 - 3.0) / 0.5 = 2.0 => photon expected = np.array([[[0, 0], [0, 1]]], dtype=np.uint8) - - assert result.shape == image.shape assert_array_equal(result, expected) diff --git a/tests/test_image.py b/tests/test_image.py index ec9d971..1ebedc2 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -13,56 +13,98 @@ def test_rotate90(): - image = np.array([[[1, 2], [3, 4]]], dtype=np.float32) # shape (1, 2, 2) - result = rotate90(image) + img = np.array([[[1, 2], [3, 4]]], dtype=np.float32) + result = rotate90(img) expected = np.array([[[2, 4], [1, 3]]], dtype=np.float32) assert_array_equal(result, expected) def test_stackmean(): - stack = np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]], dtype=np.float32) + stack = np.array( + [ + [[1, 2], [3, 4]], + [[1, 2], [3, 4]], + ], + dtype=np.float32, + ) result = stackmean(stack) expected = np.array([[[1, 2], [3, 4]]], dtype=np.float32) assert_allclose(result, expected) def test_stacksum(): - stack = np.array([[[1, 1], [1, 1]], [[1, 1], [1, 1]]], dtype=np.float32) + stack = np.array( + [ + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + ], + dtype=np.float32, + ) result = stacksum(stack) expected = np.array([[[2, 2], [2, 2]]], dtype=np.float32) assert_array_equal(result, expected) def test_stackstd(): - stack = np.array([[[1, 2], [3, 4]], [[3, 4], [5, 6]]], dtype=np.float32) + stack = np.array( + [ + [[0, 1], [2, 3]], + [[2, 3], [4, 5]], + ], + dtype=np.float32, + ) result = stackstd(stack) expected = np.std(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_stackvar(): - stack = np.array([[[1, 2], [3, 4]], [[3, 4], [5, 6]]], dtype=np.float32) + stack = np.array( + [ + [[0, 1], [2, 3]], + [[2, 3], [4, 5]], + ], + dtype=np.float32, + ) result = stackvar(stack) expected = np.var(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_stackstderr(): - stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) + stack = np.array( + [ + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + ], + dtype=np.float32, + ) expected = np.std(stack, axis=0, ddof=1, keepdims=True) / np.sqrt(stack.shape[0]) result = stackstderr(stack) assert_allclose(result, expected) def test_images_mean(): - stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) + stack = np.array( + [ + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + ], + dtype=np.float32, + ) result = images_mean(stack) expected = np.mean(stack, axis=0, keepdims=True) assert_allclose(result, expected) def test_images_sum(): - stack = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32) + stack = np.array( + [ + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + ], + dtype=np.float32, + ) result = images_sum(stack) expected = np.sum(stack, axis=0, keepdims=True) assert_array_equal(result, expected) From aaceeef6e9986cbb5918edaf9bae3373b4bd931e Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 23 Jun 2025 23:27:45 -0400 Subject: [PATCH 29/48] updated test_fastccd.py and test_image.py --- tests/test_fastccd.py | 20 ++++++++++---------- tests/test_image.py | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py index 412ff34..2fc9045 100644 --- a/tests/test_fastccd.py +++ b/tests/test_fastccd.py @@ -4,22 +4,22 @@ def test_correct_images(): - image = np.full((1, 2, 2), 100, dtype=np.float32) - dark = np.full((2, 2), 20, dtype=np.float32) - flat = np.full((2, 2), 2.0, dtype=np.float32) + image = np.ones((1, 2, 2), dtype=np.float32) * 100 + dark = np.ones((2, 2), dtype=np.float32) * 20 + flat = np.ones((2, 2), dtype=np.float32) * 2.0 - result = correct_images(image, dark, flat) - expected = np.full((1, 2, 2), 40.0, dtype=np.float32) + output = correct_images(image, dark, flat) + expected = np.ones((1, 2, 2), dtype=np.float32) * 40 - assert_array_equal(result, expected) + assert_array_equal(output, expected) def test_photon_count(): - image = np.array([[[0.1, 1.0], [3.1, 4.0]]], dtype=np.float32) - mean_filter = np.array([[[0.0, 0.0], [3.0, 3.0]]], dtype=np.float32) - std_filter = np.array([[[1.0, 1.0], [0.5, 0.5]]], dtype=np.float32) + image = np.array([[[2.0, 4.0], [6.0, 8.0]]], dtype=np.float32) + mean_filter = np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32) + std_filter = np.array([[[1.0, 1.0], [1.0, 1.0]]], dtype=np.float32) result = photon_count(image, mean_filter, std_filter) + expected = np.array([[[1, 1], [1, 1]]], dtype=np.uint8) - expected = np.array([[[0, 0], [0, 1]]], dtype=np.uint8) assert_array_equal(result, expected) diff --git a/tests/test_image.py b/tests/test_image.py index 1ebedc2..1cfad9c 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -13,8 +13,8 @@ def test_rotate90(): - img = np.array([[[1, 2], [3, 4]]], dtype=np.float32) - result = rotate90(img) + image = np.array([[[1, 2], [3, 4]]], dtype=np.float32) + result = rotate90(image) expected = np.array([[[2, 4], [1, 3]]], dtype=np.float32) assert_array_equal(result, expected) @@ -28,7 +28,7 @@ def test_stackmean(): dtype=np.float32, ) result = stackmean(stack) - expected = np.array([[[1, 2], [3, 4]]], dtype=np.float32) + expected = np.mean(stack, axis=0) assert_allclose(result, expected) @@ -41,7 +41,7 @@ def test_stacksum(): dtype=np.float32, ) result = stacksum(stack) - expected = np.array([[[2, 2], [2, 2]]], dtype=np.float32) + expected = np.sum(stack, axis=0) assert_array_equal(result, expected) @@ -54,7 +54,7 @@ def test_stackstd(): dtype=np.float32, ) result = stackstd(stack) - expected = np.std(stack, axis=0, keepdims=True) + expected = np.std(stack, axis=0) assert_allclose(result, expected) @@ -67,7 +67,7 @@ def test_stackvar(): dtype=np.float32, ) result = stackvar(stack) - expected = np.var(stack, axis=0, keepdims=True) + expected = np.var(stack, axis=0) assert_allclose(result, expected) @@ -79,7 +79,7 @@ def test_stackstderr(): ], dtype=np.float32, ) - expected = np.std(stack, axis=0, ddof=1, keepdims=True) / np.sqrt(stack.shape[0]) + expected = np.std(stack, axis=0, ddof=1) / np.sqrt(stack.shape[0]) result = stackstderr(stack) assert_allclose(result, expected) @@ -93,7 +93,7 @@ def test_images_mean(): dtype=np.float32, ) result = images_mean(stack) - expected = np.mean(stack, axis=0, keepdims=True) + expected = np.mean(stack, axis=0) assert_allclose(result, expected) @@ -106,5 +106,5 @@ def test_images_sum(): dtype=np.float32, ) result = images_sum(stack) - expected = np.sum(stack, axis=0, keepdims=True) + expected = np.sum(stack, axis=0) assert_array_equal(result, expected) From 95613939f87477cc838d233a2fb9a384415f1305 Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 00:21:02 -0400 Subject: [PATCH 30/48] removed tests folder containing test_fastccd.py and test_image.py --- tests/test_fastccd.py | 25 ---------- tests/test_image.py | 110 ------------------------------------------ 2 files changed, 135 deletions(-) delete mode 100644 tests/test_fastccd.py delete mode 100644 tests/test_image.py diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py deleted file mode 100644 index 2fc9045..0000000 --- a/tests/test_fastccd.py +++ /dev/null @@ -1,25 +0,0 @@ -import numpy as np -from csxtools.fastccd import correct_images, photon_count -from numpy.testing import assert_array_equal - - -def test_correct_images(): - image = np.ones((1, 2, 2), dtype=np.float32) * 100 - dark = np.ones((2, 2), dtype=np.float32) * 20 - flat = np.ones((2, 2), dtype=np.float32) * 2.0 - - output = correct_images(image, dark, flat) - expected = np.ones((1, 2, 2), dtype=np.float32) * 40 - - assert_array_equal(output, expected) - - -def test_photon_count(): - image = np.array([[[2.0, 4.0], [6.0, 8.0]]], dtype=np.float32) - mean_filter = np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32) - std_filter = np.array([[[1.0, 1.0], [1.0, 1.0]]], dtype=np.float32) - - result = photon_count(image, mean_filter, std_filter) - expected = np.array([[[1, 1], [1, 1]]], dtype=np.uint8) - - assert_array_equal(result, expected) diff --git a/tests/test_image.py b/tests/test_image.py deleted file mode 100644 index 1cfad9c..0000000 --- a/tests/test_image.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -from csxtools.image import ( - rotate90, - stackmean, - stacksum, - stackstd, - stackvar, - stackstderr, - images_mean, - images_sum, -) -from numpy.testing import assert_array_equal, assert_allclose - - -def test_rotate90(): - image = np.array([[[1, 2], [3, 4]]], dtype=np.float32) - result = rotate90(image) - expected = np.array([[[2, 4], [1, 3]]], dtype=np.float32) - assert_array_equal(result, expected) - - -def test_stackmean(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[1, 2], [3, 4]], - ], - dtype=np.float32, - ) - result = stackmean(stack) - expected = np.mean(stack, axis=0) - assert_allclose(result, expected) - - -def test_stacksum(): - stack = np.array( - [ - [[1, 1], [1, 1]], - [[1, 1], [1, 1]], - ], - dtype=np.float32, - ) - result = stacksum(stack) - expected = np.sum(stack, axis=0) - assert_array_equal(result, expected) - - -def test_stackstd(): - stack = np.array( - [ - [[0, 1], [2, 3]], - [[2, 3], [4, 5]], - ], - dtype=np.float32, - ) - result = stackstd(stack) - expected = np.std(stack, axis=0) - assert_allclose(result, expected) - - -def test_stackvar(): - stack = np.array( - [ - [[0, 1], [2, 3]], - [[2, 3], [4, 5]], - ], - dtype=np.float32, - ) - result = stackvar(stack) - expected = np.var(stack, axis=0) - assert_allclose(result, expected) - - -def test_stackstderr(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - ], - dtype=np.float32, - ) - expected = np.std(stack, axis=0, ddof=1) / np.sqrt(stack.shape[0]) - result = stackstderr(stack) - assert_allclose(result, expected) - - -def test_images_mean(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - ], - dtype=np.float32, - ) - result = images_mean(stack) - expected = np.mean(stack, axis=0) - assert_allclose(result, expected) - - -def test_images_sum(): - stack = np.array( - [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - ], - dtype=np.float32, - ) - result = images_sum(stack) - expected = np.sum(stack, axis=0) - assert_array_equal(result, expected) From 90c24deb56587d6582d2c6a6c2bd07f7b66ab4df Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 10:21:36 -0400 Subject: [PATCH 31/48] restored test_fastccd.py and test_image.py --- tests/test_fastccd.py | 53 ++++++++++++++++++++ tests/test_image.py | 113 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 166 insertions(+) create mode 100644 tests/test_fastccd.py create mode 100644 tests/test_image.py diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py new file mode 100644 index 0000000..e581683 --- /dev/null +++ b/tests/test_fastccd.py @@ -0,0 +1,53 @@ +import numpy as np +from csxtools.fastccd import correct_images, photon_count +from numpy.testing import ( + assert_array_max_ulp, + assert_array_equal, + assert_array_almost_equal, +) + + +def test_correct_images(): + x = np.ones((3, 10, 10), dtype=np.uint16) + x[0] = x[0] * 0x0010 + x[1] = x[1] * 0x8020 + x[2] = x[2] * 0xC030 + + y = np.ones((3, 10, 10), dtype=np.float32) + y[0] = y[0] * 0x0010 + y[1] = y[1] * 0x0020 + y[2] = y[2] * 0x0030 + z = correct_images(x, y) + assert_array_max_ulp(z, np.zeros_like(x)) + + +def test_photon_count(): + x = np.array( + [ + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 4, 3, 0], + [0, 0, 0, 10, 0, 4, 0, 0], + [0, 0, 4, 6, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ], + dtype=np.float32, + ) + + nsum = 3 + + y = np.zeros_like(x) + y[2, 3] = 20 + z = np.zeros_like(x) + z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0], dtype=np.float32)[:nsum]) + + op = photon_count( + np.array([x, x, x], dtype=np.float32), + thresh=(5, 13), + mean_filter=(10, 30), + std_filter=(0, 100), + nsum=nsum, + ) + + assert_array_equal(op[0], np.array([y, y, y])) + assert_array_almost_equal(op[1], np.array([z, z, z]), decimal=6) diff --git a/tests/test_image.py b/tests/test_image.py new file mode 100644 index 0000000..190ab09 --- /dev/null +++ b/tests/test_image.py @@ -0,0 +1,113 @@ +from csxtools.image import ( + rotate90, + stackmean, + stacksum, + stackstd, + stackvar, + stackstderr, + images_mean, + images_sum, +) +import numpy as np +from numpy.testing import assert_array_equal, assert_array_almost_equal + + +def test_rotate90(): + x = np.arange(4 * 20, dtype=np.float32).reshape(4, 20) + y = rotate90(np.array([x, x, x, x]), "ccw") + for i in y: + assert_array_equal(i, np.rot90(x, 1)) + + y = rotate90(np.array([x, x, x, x]), "cw") + for i in y: + assert_array_equal(i, np.rot90(x, -1)) + + +def test_stackmean(): + x = np.ones((1, 100, 100), dtype=np.float32) * np.nan + m = stackmean(x) + assert_array_equal(m, np.zeros((100, 100), dtype=np.float32)) + + x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 + m = stackmean(x) + assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0) + + # Now test with nans + + x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 + x[10] = np.nan + x[23] = np.nan + x[40] = np.nan + m = stackmean(x) + assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0) + + +def test_stacksum(): + x = np.ones((1, 100, 100), dtype=np.float32) * np.nan + m, n = stacksum(x) + assert_array_equal(m, np.nan * np.zeros((100, 100), dtype=np.float32)) + assert_array_equal(n, np.zeros((100, 100), dtype=np.float32)) + + x = np.ones((1000, 100, 100), dtype=np.float32) * 52.0 + m, n = stacksum(x) + assert_array_equal(m, np.ones((100, 100), dtype=np.float32) * 52.0 * 1000) + assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * 1000.0) + + # Now test with nans + + x = np.ones((1000, 100, 100), dtype=np.float32) * 2 + x[10] = np.nan + x[23] = np.nan + x[40] = np.nan + m, n = stacksum(x) + assert_array_almost_equal( + m, np.ones((100, 100), dtype=np.float32) * 2000, decimal=3 + ) + assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * (1000 - 3)) + + +def test_stackstd(): + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) + m, n = stackstd(x) + assert_array_almost_equal(m, np.std(x, axis=0), 2) + assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) + + +def test_stackvar(): + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) + m, n = stackvar(x) + assert_array_almost_equal(m, np.var(x, axis=0), 0) + assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) + + +def test_stackstderr(): + x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20)) + m, n = stackstderr(x) + assert_array_almost_equal(m, np.std(x, axis=0) / np.sqrt(n), 3) + assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0) + + +def test_images_mean(): + x = np.array( + [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + ) + m = images_mean(x) + assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3) + + +def test_images_sum(): + x = np.array( + [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + ) + m = images_sum(x) + assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) for x1 in x]), 3) From 39644155071684d52e0e9541b979d29d290c7f6a Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 10:29:24 -0400 Subject: [PATCH 32/48] corrected test_image.py --- tests/test_image.py | 46 +++++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/tests/test_image.py b/tests/test_image.py index 190ab09..378a545 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -88,26 +88,40 @@ def test_stackstderr(): def test_images_mean(): - x = np.array( - [ - np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( - (ii * 100, 20, 20) - ) - for ii in range(1, 11) - ] - ) + x = [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + + # x = np.array( + # [ + # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + # (ii * 100, 20, 20) + # ) + # for ii in range(1, 11) + # ] + # ) m = images_mean(x) assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3) def test_images_sum(): - x = np.array( - [ - np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( - (ii * 100, 20, 20) - ) - for ii in range(1, 11) - ] - ) + x = [ + np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + (ii * 100, 20, 20) + ) + for ii in range(1, 11) + ] + + #x = np.array( + # [ + # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + # (ii * 100, 20, 20) + # ) + # for ii in range(1, 11) + # ] + # ) m = images_sum(x) assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) for x1 in x]), 3) From d7b98112774ff507ad6b6c4c20b131b81730e4df Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 11:16:48 -0400 Subject: [PATCH 33/48] updated setup.py --- setup.py | 48 +++++++++++++++++++-------------------------- tests/test_image.py | 20 +++++++++---------- 2 files changed, 30 insertions(+), 38 deletions(-) diff --git a/setup.py b/setup.py index 4002e28..5e75ce8 100644 --- a/setup.py +++ b/setup.py @@ -1,39 +1,33 @@ from __future__ import absolute_import, division, print_function import sys -from distutils.core import Extension, setup +from distutils.core import Extension from os import path -import numpy as np import setuptools -from setuptools.command.build_ext import build_ext # Import build_ext +from setuptools.command.build_ext import build_ext import versioneer -# Custom build_ext to remove cpython-XX suffix +# Custom build_ext to delay NumPy import and strip suffix class CustomBuildExt(build_ext): + def finalize_options(self): + super().finalize_options() + import numpy # <== DELAY numpy import until now + + self.include_dirs.append(numpy.get_include()) + def get_ext_filename(self, ext_name): - # Default filename: fastccd.cpython-38-x86_64-linux-gnu.so filename = super().get_ext_filename(ext_name) - # Strip platform-specific suffix: fastccd.so return filename.split(".")[0] + ".so" min_version = (3, 8) if sys.version_info < min_version: - error = """ -bluesky-adaptive does not support Python {0}.{1}. -Python {2}.{3} and above is required. Check your Python version like so: - -python3 --version - -This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1. -Upgrade pip like so: - -pip install --upgrade pip -""".format( - *(sys.version_info[:2] + min_version) - ) + error = f""" +csxtools does not support Python {sys.version_info[0]}.{sys.version_info[1]}. +Python {min_version[0]}.{min_version[1]} and above is required. +""" sys.exit(error) here = path.abspath(path.dirname(__file__)) @@ -41,13 +35,13 @@ def get_ext_filename(self, ext_name): with open(path.join(here, "README.md"), encoding="utf-8") as readme_file: readme = readme_file.read() - with open("requirements.txt") as f: requirements = f.read().split() with open("requirements-extras.txt") as f: extras_require = {"complete": f.read().split()} +# C extensions fastccd = Extension( "fastccd", sources=["src/fastccdmodule.c", "src/fastccd.c"], @@ -75,13 +69,14 @@ def get_ext_filename(self, ext_name): extra_compile_args=["-fopenmp"], extra_link_args=["-lgomp"], ) -setup( + +# Setup +setuptools.setup( name="csxtools", version=versioneer.get_version(), - # cmdclass=versioneer.get_cmdclass(), cmdclass={ **versioneer.get_cmdclass(), - "build_ext": CustomBuildExt, # Use the custom build_ext + "build_ext": CustomBuildExt, }, author="Brookhaven National Laboratory", description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.", @@ -89,13 +84,10 @@ def get_ext_filename(self, ext_name): python_requires=">={}".format(".".join(str(n) for n in min_version)), long_description=readme, long_description_content_type="text/markdown", - ext_package="csxtools.ext", - include_dirs=[np.get_include()], - # ext_modules=[fastccd, image, phocount], - ext_modules=[fastccd, axis1, image, phocount], - tests_require=["pytest"], install_requires=requirements, extras_require=extras_require, + ext_package="csxtools.ext", + ext_modules=[fastccd, axis1, image, phocount], url="https://github.com/NSLS-II-CSX/csxtools", keywords="Xray Analysis", license="BSD", diff --git a/tests/test_image.py b/tests/test_image.py index 378a545..b24df9d 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -94,15 +94,15 @@ def test_images_mean(): ) for ii in range(1, 11) ] - - # x = np.array( - # [ - # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( - # (ii * 100, 20, 20) - # ) - # for ii in range(1, 11) - # ] - # ) + + # x = np.array( + # [ + # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( + # (ii * 100, 20, 20) + # ) + # for ii in range(1, 11) + # ] + # ) m = images_mean(x) assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3) @@ -115,7 +115,7 @@ def test_images_sum(): for ii in range(1, 11) ] - #x = np.array( + # x = np.array( # [ # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape( # (ii * 100, 20, 20) From 0ba6460b89fe6ac72f540d87654c90493a92c9ed Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 11:45:08 -0400 Subject: [PATCH 34/48] added pyproject.toml --- pyproject.toml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..145d9bf --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[build-system] +requires = [ + "setuptools>=61.0", + "wheel", + "numpy", + "versioneer[toml]>=0.28" +] +build-backend = "setuptools.build_meta" From 9e737b70b411b000f7f8428d314c64378a4debdb Mon Sep 17 00:00:00 2001 From: nisar Date: Tue, 24 Jun 2025 15:12:37 -0400 Subject: [PATCH 35/48] modified csxtools/.github/workflows/_test-in-conda-env.yml --- .github/workflows/_test-in-conda-env.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/_test-in-conda-env.yml b/.github/workflows/_test-in-conda-env.yml index 5ebfbcf..a2e2c40 100644 --- a/.github/workflows/_test-in-conda-env.yml +++ b/.github/workflows/_test-in-conda-env.yml @@ -61,6 +61,11 @@ jobs: - name: Check out the code repo uses: actions/checkout@v4 + - name: Workaround: Fix .condarc MultipleKeysError + run: | + sed -i '/auto_activate_base/d' /home/runner/.condarc || true + sed -i '/auto_activate:/d' /home/runner/.condarc || true + - name: Set up Python ${{ inputs.python-version }} with conda uses: conda-incubator/setup-miniconda@v3 with: From b7e404b7a884f56dfac15377b0577ffd41c74519 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 10:15:08 -0400 Subject: [PATCH 36/48] Changed from distutils.core import Extension to from setuptools import Extension to avoid errors in future --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5e75ce8..289b92d 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ from __future__ import absolute_import, division, print_function import sys -from distutils.core import Extension +from setuptools import Extension from os import path import setuptools From f00e630e9c85825b0f0944c39b0192c36556150b Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 10:34:50 -0400 Subject: [PATCH 37/48] extende the packege for python version 3.11 and 3.12 --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a1db53a..713d33c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] fail-fast: false steps: From fe79bf01601a73cad2c5661f683c23be8b86a605 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 10:44:47 -0400 Subject: [PATCH 38/48] upgrade setup-python to v5 and codecov-action to v4 for improved compatibility and security --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 713d33c..81487c8 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -39,7 +39,7 @@ jobs: coverage xml - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v4 with: file: ./coverage.xml flags: unittests From 533a294a63b5c0ea803a4960df3800d9365941a1 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 12:20:55 -0400 Subject: [PATCH 39/48] updated MANIFEST.in and extended classifiers in setup.py --- MANIFEST.in | 19 ++++++++++++++++--- setup.py | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index c31f33d..d8dd7d0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,20 @@ +# Metadata and versioning +include README.md +include LICENSE.txt +include versioneer.py +include csxtools/_version.py + +# Requirements include requirements.txt include requirements-extras.txt -include versioneer.py -include csxtools/_version.py +# Source code +recursive-include csxtools *.py *.so + +# C sources and headers +recursive-include src *.c *.h + +# Documentation and notebooks +recursive-include doc * +recursive-include examples *.ipynb -recursive-include src * *.[hc] diff --git a/setup.py b/setup.py index 289b92d..8991a9f 100644 --- a/setup.py +++ b/setup.py @@ -95,5 +95,6 @@ def get_ext_filename(self, ext_name): "Development Status :: 2 - Pre-Alpha", "Natural Language :: English", "Programming Language :: Python :: 3", + "Programming Language :: C", ], ) From 0f06457a14c6d9fc0edb8b0f0733ae04a628ef19 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 14:43:53 -0400 Subject: [PATCH 40/48] Refactor: Simplify .flake8 exclude for .ipynb_checkpoints --- .flake8 | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.flake8 b/.flake8 index c0af29c..288036a 100644 --- a/.flake8 +++ b/.flake8 @@ -6,10 +6,7 @@ exclude = dist, versioneer.py, csxtools/doc/conf.py - .ipynb_checkpoints, - csxtools/.ipynb_checkpoints, - csxtools/csxtools/.ipynb_checkpoints, - csxtools/csxtools/helpers/.ipynb_checkpoints + *.ipynb_checkpoints, max-line-length = 140 ignore = E203, W503, E722 \ No newline at end of file From 6752d44a39d88b5e7e33c1ae453c4509e1909038 Mon Sep 17 00:00:00 2001 From: nisar Date: Wed, 25 Jun 2025 16:26:32 -0400 Subject: [PATCH 41/48] Few docstrings are corrected --- csxtools/axis1/images.py | 6 ++---- csxtools/utils.py | 10 +++++----- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/csxtools/axis1/images.py b/csxtools/axis1/images.py index 7158b84..e12248c 100644 --- a/csxtools/axis1/images.py +++ b/csxtools/axis1/images.py @@ -8,7 +8,7 @@ def correct_images_axis(images, dark=None, flat=None): - """Subtract backgrond and gain correct images + """Subtract backgrond and correct images This routine subtrtacts the backgrond and corrects the images for AXIS1. @@ -19,9 +19,7 @@ def correct_images_axis(images, dark=None, flat=None): Input array of images to correct of shape (N, y, x) where N is the number of images and x and y are the image size. dark : array_like, optional - Input array of dark images. This should be of shape (3, y, x). - dark[0] is the gain 8 (most sensitive setting) dark image with - dark[2] being the gain 1 (least sensitive) dark image. + Input array of dark images. This should be of shape (y, x) flat : array_like, optional Input array for the flatfield correction. This should be of shape (y, x) diff --git a/csxtools/utils.py b/csxtools/utils.py index fa213b4..e729c65 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -128,9 +128,9 @@ def get_fastccd_images( def get_axis_images(light_header, dark_header=None, flat=None, tag=None, roi=None): - """Retreive and correct AXIS1 Images from associated headers + """Retreive and correct AXIS Images from associated headers - Retrieve AXIS1 Images from databroker and correct for: + Retrieve AXIS Images from databroker and correct for: - Bad Pixels (converted to ``np.nan``) - Backgorund. @@ -142,7 +142,7 @@ def get_axis_images(light_header, dark_header=None, flat=None, tag=None, roi=Non light_header : databorker header This header defines the images to convert - dark_headers : databroker headers , optional + dark_header : databroker header , optional The header is the dark images. flat : array_like @@ -318,7 +318,7 @@ def get_fastccd_timestamps(header, tag="fccd_image"): def get_axis_timestamps(header, tag="axis1_hdf5_time_stamp"): - """Return the AXIS1 timestamps from the Areadetector Data File + """Return the AXIS timestamps from the Areadetector Data File Return a list of numpy arrays of the timestamps for the images as recorded in the datafile. @@ -486,7 +486,7 @@ def fccd_mask(): def axis_mask(): - """Return the initial flatfield mask for the AXIS1 + """Return the initial flatfield mask for the AXIS Returns ------- From 688e3a4cf9a05822b8cde4540ff2f68dc5dabb0b Mon Sep 17 00:00:00 2001 From: nisar Date: Thu, 26 Jun 2025 09:52:34 -0400 Subject: [PATCH 42/48] modified csxtools/settings.py and csxtools/utils.py to add all AXIS dtectors and to modify default behaviour --- csxtools/settings.py | 4 ++++ csxtools/utils.py | 5 +++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/csxtools/settings.py b/csxtools/settings.py index 2299c22..cdd9c41 100644 --- a/csxtools/settings.py +++ b/csxtools/settings.py @@ -1,4 +1,8 @@ detectors = {} detectors["fccd"] = "fccd_image" detectors["axis1"] = "axis1_image" +detectors["axis"] = "axis_image" +detectors["axis_standard"] = "axis_standard_image" +detectors["axis_cont"] = "axis_cont_image" + diff_angles = ["delta", "theta", "gamma", None, None, None] diff --git a/csxtools/utils.py b/csxtools/utils.py index e729c65..f2bc15c 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -171,8 +171,9 @@ def get_axis_images(light_header, dark_header=None, flat=None, tag=None, roi=Non def _get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None): if tag is None: - tag = detectors["axis1"] - + logger.error("Must pass 'tag' argument to get_axis_images()") + raise ValueError("Must pass 'tag' argument") + # Now lets sort out the ROI if roi is not None: roi = list(roi) From d691e017893086cc8616b29cac95c8944e907079 Mon Sep 17 00:00:00 2001 From: nisar Date: Thu, 26 Jun 2025 15:52:20 -0400 Subject: [PATCH 43/48] axis_mask() is removed from csxtools/utils.py --- csxtools/utils.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/csxtools/utils.py b/csxtools/utils.py index f2bc15c..38795a8 100644 --- a/csxtools/utils.py +++ b/csxtools/utils.py @@ -173,7 +173,7 @@ def _get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=N if tag is None: logger.error("Must pass 'tag' argument to get_axis_images()") raise ValueError("Must pass 'tag' argument") - + # Now lets sort out the ROI if roi is not None: roi = list(roi) @@ -484,19 +484,3 @@ def fccd_mask(): flat = np.rot90(flat) return flat - - -def axis_mask(): - """Return the initial flatfield mask for the AXIS - - Returns - ------- - np.array of flatfield - - """ - flat = np.ones((960, 960)) - flat[120:250, 0:480] = np.nan - flat[:, 476:484] = np.nan - flat = np.rot90(flat) - - return flat From 1bb48c12b9fd24e9ac1b66ec1e3a6ef6f3b1be5c Mon Sep 17 00:00:00 2001 From: nisar Date: Fri, 27 Jun 2025 21:28:01 -0400 Subject: [PATCH 44/48] Remove dead Landscape.io badge from README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 238f28b..649c640 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ CSX Data Analysis Tools [![Build Status](https://travis-ci.org/NSLS-II-CSX/csxtools.svg?branch=master)](https://travis-ci.org/NSLS-II-CSX/csxtools) [![Coverage Status](https://coveralls.io/repos/NSLS-II-CSX/csxtools/badge.svg?branch=master&service=github)](https://coveralls.io/github/NSLS-II-CSX/csxtools?branch=master) -[![Code Health](https://landscape.io/github/NSLS-II-CSX/csxtools/master/landscape.svg?style=flat)](https://landscape.io/github/NSLS-II-CSX/csxtools/master) + Python library for tools to be used at the Coherent Soft X-ray scattering beamline at NSLS-II, (CSX, 23-ID) From cea94b3d2d9ed7b87fa5dbbddd9365b5ff79f476 Mon Sep 17 00:00:00 2001 From: nisar Date: Fri, 27 Jun 2025 21:54:09 -0400 Subject: [PATCH 45/48] Update README: removed broken Travis CI badge and added GitHub Actions badge for unit tests --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 649c640..8e3283f 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ CSX Data Analysis Tools [![Build Status](https://travis-ci.org/NSLS-II-CSX/csxtools.svg?branch=master)](https://travis-ci.org/NSLS-II-CSX/csxtools) +[![Unit Tests](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml/badge.svg)](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/NSLS-II-CSX/csxtools/badge.svg?branch=master&service=github)](https://coveralls.io/github/NSLS-II-CSX/csxtools?branch=master) From e3bf201824bb16d6ca37e5691454bf135c02e69d Mon Sep 17 00:00:00 2001 From: nisar Date: Mon, 30 Jun 2025 15:51:40 -0400 Subject: [PATCH 46/48] A version implementing suggestions from Phil Maffettone in the PR. --- .flake8 | 2 +- .github/workflows/tests.yml | 2 +- csxtools/axis1/images.py | 6 +++--- csxtools/helpers/fastccd.py | 6 +++--- csxtools/image_corr.py | 2 +- setup.py | 3 ++- 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.flake8 b/.flake8 index 288036a..f9b52a9 100644 --- a/.flake8 +++ b/.flake8 @@ -9,4 +9,4 @@ exclude = *.ipynb_checkpoints, max-line-length = 140 -ignore = E203, W503, E722 \ No newline at end of file +ignore = E203, W503 \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 81487c8..4a984a3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] fail-fast: false steps: diff --git a/csxtools/axis1/images.py b/csxtools/axis1/images.py index e12248c..a2536b7 100644 --- a/csxtools/axis1/images.py +++ b/csxtools/axis1/images.py @@ -8,10 +8,10 @@ def correct_images_axis(images, dark=None, flat=None): - """Subtract backgrond and correct images + """Subtract background and correct images - This routine subtrtacts the backgrond and corrects the images - for AXIS1. + This routine subtracts the background and corrects the images + for AXIS1 detector. Parameters ---------- diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py index 21fcace..30277ca 100644 --- a/csxtools/helpers/fastccd.py +++ b/csxtools/helpers/fastccd.py @@ -138,7 +138,7 @@ def get_dark_near( # print( darks_possible ) try: dark = int(darks_possible.sort_values(by="delta_time").reset_index()["scan"][0]) - except: + except: # noqa: E722 dark = None return None @@ -240,12 +240,12 @@ def get_fastccd_pixel_readout(header): config = header.descriptors[0]["configuration"]["fccd"]["data"] try: overscan_cols = config["fccd_cam_overscan_cols"] # this is hardware config - except: + except: # noqa: E722 overscan_cols = "unknown" # can code using tiled to infer by Xarray shape; test setting to None try: rows = config["fccd_fccd1_rows"] row_offset = config["fccd_fccd1_row_offset"] - except: + except: # noqa: E722 rows = ( "unknown" # need to rely on hardcoded concatenation ; test setting to None ) diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py index 4c70c32..0058609 100644 --- a/csxtools/image_corr.py +++ b/csxtools/image_corr.py @@ -24,7 +24,7 @@ def correct_events(evs, data_key, dark_images, drop_raw=False): "data": dict(ev["data"]), "timestamps": dict(ev["timestamps"]), } - # TODO: replace stub with actual subtract_background implementation + # TODO (nisarnk): replace stub with actual subtract_background implementation corr, gain_img = subtract_background( # noqa F821 ev["data"][data_key], dark_images ) diff --git a/setup.py b/setup.py index 8991a9f..8513a5a 100644 --- a/setup.py +++ b/setup.py @@ -79,7 +79,8 @@ def get_ext_filename(self, ext_name): "build_ext": CustomBuildExt, }, author="Brookhaven National Laboratory", - description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.", + description="""Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) + beamline at NSLS-II.""", packages=setuptools.find_packages(exclude=["src", "tests"]), python_requires=">={}".format(".".join(str(n) for n in min_version)), long_description=readme, From f6ce50c334360e0179ac5c5d0c67613bfdd8c55e Mon Sep 17 00:00:00 2001 From: nisarnk <78008867+nisarnk@users.noreply.github.com> Date: Tue, 1 Jul 2025 10:51:40 -0400 Subject: [PATCH 47/48] Update README.md Remove Travis CI badge (inactive) and replace with GitHub Actions test badge --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 8e3283f..648d23c 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,6 @@ CSX Data Analysis Tools ======================= - - -[![Build Status](https://travis-ci.org/NSLS-II-CSX/csxtools.svg?branch=master)](https://travis-ci.org/NSLS-II-CSX/csxtools) [![Unit Tests](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml/badge.svg)](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/NSLS-II-CSX/csxtools/badge.svg?branch=master&service=github)](https://coveralls.io/github/NSLS-II-CSX/csxtools?branch=master) From 726e12e8cf6fa6cecc58f1fba01c592538c970ab Mon Sep 17 00:00:00 2001 From: nisarnk <78008867+nisarnk@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:01:55 -0400 Subject: [PATCH 48/48] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 648d23c..66ea98b 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,10 @@ CSX Data Analysis Tools ======================= [![Unit Tests](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml/badge.svg)](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml) -[![Coverage Status](https://coveralls.io/repos/NSLS-II-CSX/csxtools/badge.svg?branch=master&service=github)](https://coveralls.io/github/NSLS-II-CSX/csxtools?branch=master) - +[![codecov](https://codecov.io/gh/NSLS-II-CSX/csxtools/branch/master/graph/badge.svg)](https://codecov.io/gh/NSLS-II-CSX/csxtools) +[![PyPI version](https://badge.fury.io/py/csxtools.svg)](https://badge.fury.io/py/csxtools) +[![License: BSD-3-Clause](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![Downloads](https://pepy.tech/badge/csxtools)](https://pepy.tech/project/csxtools) Python library for tools to be used at the Coherent Soft X-ray scattering beamline at NSLS-II, (CSX, 23-ID)