diff --git a/.flake8 b/.flake8
index 44c45e7..f9b52a9 100644
--- a/.flake8
+++ b/.flake8
@@ -5,5 +5,8 @@ exclude =
build,
dist,
versioneer.py,
- doc/conf.py
-max-line-length = 115
+ csxtools/doc/conf.py
+ *.ipynb_checkpoints,
+
+max-line-length = 140
+ignore = E203, W503
\ No newline at end of file
diff --git a/.github/workflows/_test-in-conda-env.yml b/.github/workflows/_test-in-conda-env.yml
index 5ebfbcf..a2e2c40 100644
--- a/.github/workflows/_test-in-conda-env.yml
+++ b/.github/workflows/_test-in-conda-env.yml
@@ -61,6 +61,11 @@ jobs:
- name: Check out the code repo
uses: actions/checkout@v4
+ - name: Workaround: Fix .condarc MultipleKeysError
+ run: |
+ sed -i '/auto_activate_base/d' /home/runner/.condarc || true
+ sed -i '/auto_activate:/d' /home/runner/.condarc || true
+
- name: Set up Python ${{ inputs.python-version }} with conda
uses: conda-incubator/setup-miniconda@v3
with:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index a1db53a..4a984a3 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -9,7 +9,7 @@ jobs:
strategy:
matrix:
- python-version: ["3.8", "3.9", "3.10"]
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
fail-fast: false
steps:
@@ -17,7 +17,7 @@ jobs:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
@@ -39,7 +39,7 @@ jobs:
coverage xml
- name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v4
with:
file: ./coverage.xml
flags: unittests
diff --git a/MANIFEST.in b/MANIFEST.in
index c31f33d..d8dd7d0 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,20 @@
+# Metadata and versioning
+include README.md
+include LICENSE.txt
+include versioneer.py
+include csxtools/_version.py
+
+# Requirements
include requirements.txt
include requirements-extras.txt
-include versioneer.py
-include csxtools/_version.py
+# Source code
+recursive-include csxtools *.py *.so
+
+# C sources and headers
+recursive-include src *.c *.h
+
+# Documentation and notebooks
+recursive-include doc *
+recursive-include examples *.ipynb
-recursive-include src * *.[hc]
diff --git a/README.md b/README.md
index 238f28b..66ea98b 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,11 @@
CSX Data Analysis Tools
=======================
-
-
-[](https://travis-ci.org/NSLS-II-CSX/csxtools)
-[](https://coveralls.io/github/NSLS-II-CSX/csxtools?branch=master)
-[](https://landscape.io/github/NSLS-II-CSX/csxtools/master)
+[](https://github.com/NSLS-II-CSX/csxtools/actions/workflows/tests.yml)
+[](https://codecov.io/gh/NSLS-II-CSX/csxtools)
+[](https://badge.fury.io/py/csxtools)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://pepy.tech/project/csxtools)
Python library for tools to be used at the Coherent Soft X-ray scattering
beamline at NSLS-II, (CSX, 23-ID)
diff --git a/csxtools/__init__.py b/csxtools/__init__.py
index b41be8c..453d857 100644
--- a/csxtools/__init__.py
+++ b/csxtools/__init__.py
@@ -1,9 +1,17 @@
# Now import useful functions
-from .utils import (get_fastccd_images, get_fastccd_timestamps) # noqa F401
+from .utils import get_fastccd_images # noqa: F401
+from .utils import get_fastccd_flatfield # noqa: F401
+from .utils import get_fastccd_timestamps # noqa: F401
+
+from .utils import get_axis_images # noqa: F401
+from .utils import get_axis_flatfield # noqa: F401
+from .utils import get_axis_timestamps # noqa: F401
+
from .plotting import make_panel_plot # noqa F401
# set version string using versioneer
from ._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
del get_versions
diff --git a/csxtools/_version.py b/csxtools/_version.py
index a33ea8e..2e6a0b3 100644
--- a/csxtools/_version.py
+++ b/csxtools/_version.py
@@ -1,4 +1,3 @@
-
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -57,6 +56,7 @@ def decorate(f):
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
+
return decorate
@@ -67,9 +67,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
+ p = subprocess.Popen(
+ [c] + args,
+ cwd=cwd,
+ stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr else None),
+ )
break
except EnvironmentError:
e = sys.exc_info()[1]
@@ -99,12 +102,17 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
- print("guessing rootdir is '%s', but '%s' doesn't start with "
- "prefix '%s'" % (root, dirname, parentdir_prefix))
+ print(
+ "guessing rootdir is '%s', but '%s' doesn't start with "
+ "prefix '%s'" % (root, dirname, parentdir_prefix)
+ )
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None}
+ return {
+ "version": dirname[len(parentdir_prefix) :],
+ "full-revisionid": None,
+ "dirty": False,
+ "error": None,
+ }
@register_vcs_handler("git", "get_keywords")
@@ -144,7 +152,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
@@ -153,27 +161,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
+ tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
- print("discarding '%s', no digits" % ",".join(refs-tags))
+ print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
+ r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None
- }
+ return {
+ "version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": None,
+ }
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags"}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": "no suitable tags",
+ }
@register_vcs_handler("git", "pieces_from_vcs")
@@ -193,9 +206,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
- describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long"],
- cwd=root)
+ describe_out = run_command(
+ GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root
+ )
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
@@ -218,17 +231,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
+ git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%s'"
- % describe_out)
+ pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
@@ -237,10 +249,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
- % (full_tag, tag_prefix))
+ pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
+ full_tag,
+ tag_prefix,
+ )
return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
+ pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
@@ -251,8 +265,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
- count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
@@ -281,8 +294,7 @@ def render_pep440(pieces):
rendered += ".dirty"
else:
# exception #1
- rendered = "0+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@@ -389,10 +401,12 @@ def render_git_describe_long(pieces):
def render(pieces, style):
if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"]}
+ return {
+ "version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ }
if not style or style == "default":
style = "pep440" # the default
@@ -412,8 +426,12 @@ def render(pieces, style):
else:
raise ValueError("unknown style '%s'" % style)
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None}
+ return {
+ "version": rendered,
+ "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"],
+ "error": None,
+ }
def get_versions():
@@ -426,8 +444,7 @@ def get_versions():
verbose = cfg.verbose
try:
- return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
- verbose)
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
@@ -436,12 +453,15 @@ def get_versions():
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
- for i in cfg.versionfile_source.split('/'):
+ for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to find root of source tree"}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree",
+ }
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
@@ -455,6 +475,9 @@ def get_versions():
except NotThisMethod:
pass
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to compute version"}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version",
+ }
diff --git a/csxtools/axis1/__init__.py b/csxtools/axis1/__init__.py
new file mode 100644
index 0000000..d55c772
--- /dev/null
+++ b/csxtools/axis1/__init__.py
@@ -0,0 +1,9 @@
+from .images import correct_images_axis
+
+__all__ = ["correct_images_axis"]
+
+# set version string using versioneer
+from .._version import get_versions
+
+__version__ = get_versions()["version"]
+del get_versions
diff --git a/csxtools/axis1/images.py b/csxtools/axis1/images.py
new file mode 100644
index 0000000..a2536b7
--- /dev/null
+++ b/csxtools/axis1/images.py
@@ -0,0 +1,52 @@
+import numpy as np
+from ..ext import axis1
+import time as ttime
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def correct_images_axis(images, dark=None, flat=None):
+ """Subtract background and correct images
+
+ This routine subtracts the background and corrects the images
+ for AXIS1 detector.
+
+ Parameters
+ ----------
+ images : array_like
+ Input array of images to correct of shape (N, y, x) where N is the
+ number of images and x and y are the image size.
+ dark : array_like, optional
+ Input array of dark images. This should be of shape (y, x)
+ flat : array_like, optional
+ Input array for the flatfield correction. This should be of shape
+ (y, x)
+
+ Returns
+ -------
+ array_like
+ Array of corrected images of shape (N, y, x)
+
+ """
+
+ t = ttime.time()
+
+ logger.info("Correcting image stack of shape %s", images.shape)
+
+ if dark is None:
+ dark = np.zeros(images.shape[-2:], dtype=np.float32)
+ logger.info("Not correcting for darkfield. No input.")
+ if flat is None:
+ flat = np.ones(images.shape[-2:], dtype=np.float32)
+ logger.info("Not correcting for flatfield. No input.")
+ else:
+ flat = np.asarray(flat, dtype=np.float32)
+
+ data = axis1.correct_images_axis(images.astype(np.uint16), dark, flat)
+ t = ttime.time() - t
+
+ logger.info("Corrected image stack in %.3f seconds", t)
+
+ return data
diff --git a/csxtools/fastccd/__init__.py b/csxtools/fastccd/__init__.py
index 0eed326..94d28ce 100644
--- a/csxtools/fastccd/__init__.py
+++ b/csxtools/fastccd/__init__.py
@@ -1,9 +1,10 @@
from .images import correct_images
from .phocount import photon_count
-__all__ = ['correct_images', 'photon_count']
+__all__ = ["correct_images", "photon_count"]
# set version string using versioneer
from .._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
del get_versions
diff --git a/csxtools/fastccd/images.py b/csxtools/fastccd/images.py
index 59296b6..7275bf7 100644
--- a/csxtools/fastccd/images.py
+++ b/csxtools/fastccd/images.py
@@ -3,6 +3,7 @@
import time as ttime
import logging
+
logger = logging.getLogger(__name__)
@@ -14,7 +15,7 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)):
Parameters
----------
- in : array_like
+ images : array_like
Input array of images to correct of shape (N, y, x) where N is the
number of images and x and y are the image size.
dark : array_like, optional
@@ -49,8 +50,7 @@ def correct_images(images, dark=None, flat=None, gain=(1, 4, 8)):
else:
flat = np.asarray(flat, dtype=np.float32)
- data = fastccd.correct_images(images.astype(np.uint16),
- dark, flat, gain)
+ data = fastccd.correct_images(images.astype(np.uint16), dark, flat, gain)
t = ttime.time() - t
logger.info("Corrected image stack in %.3f seconds", t)
diff --git a/csxtools/helpers/__init__.py b/csxtools/helpers/__init__.py
index f669ba0..ea162ca 100644
--- a/csxtools/helpers/__init__.py
+++ b/csxtools/helpers/__init__.py
@@ -1,9 +1,26 @@
-from .fastccd import (get_dark_near, get_dark_near_all, get_fastccd_roi, get_fastccd_exp, get_fastccd_images_sized, convert_photons)
-from .overscan import (get_os_correction_images, get_os_dropped_images)
+from .fastccd import (
+ get_dark_near,
+ get_dark_near_all,
+ get_fastccd_roi,
+ get_fastccd_exp,
+ get_fastccd_images_sized,
+ convert_photons,
+)
+from .overscan import get_os_correction_images, get_os_dropped_images
-__all__ = ['get_dark_near', 'get_dark_near_all', 'get_fastccd_roi', 'get_fastccd_exp', 'get_fastccd_images_sized', 'convert_photons', 'get_os_correction_images', 'get_os_dropped_images']
+__all__ = [
+ "get_dark_near",
+ "get_dark_near_all",
+ "get_fastccd_roi",
+ "get_fastccd_exp",
+ "get_fastccd_images_sized",
+ "convert_photons",
+ "get_os_correction_images",
+ "get_os_dropped_images",
+]
# set version string using versioneer
from .._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
del get_versions
diff --git a/csxtools/helpers/fastccd.py b/csxtools/helpers/fastccd.py
index 3ca8d54..30277ca 100644
--- a/csxtools/helpers/fastccd.py
+++ b/csxtools/helpers/fastccd.py
@@ -1,94 +1,144 @@
+import logging
+import numpy as np
import pandas
from collections import namedtuple
-import numpy as np
-from csxtools.image import rotate90, stackmean
-from csxtools.utils import calculate_flatfield, get_images_to_3D, get_fastccd_images, get_images_to_4D
+from ipywidgets import (
+ interact,
+) # TODO move this and general utility to different module later
+
+from csxtools.utils import get_fastccd_images, get_images_to_4D
from csxtools.helpers.overscan import get_os_correction_images, get_os_dropped_images
-import logging
logger = logging.getLogger(__name__)
-from ipywidgets import interact #TODO move this and general untility to different module later (like movie making)
-
-def browse_3Darray(res,title='Frame'):#, extra_scalar_dict=None):
- """ Widget for notebooks. Sliding bar to browse 3D python array. Must plot using subplots method with 1 axes.
+def browse_3Darray(res, title="Frame"): # , extra_scalar_dict=None):
+ """Widget for notebooks. Sliding bar to browse 3D python array. Must plot using subplots method with 1 axes.
res : 3D array with the first element being interated
-
+
title : string to be the title of the plot
- """
+ """
N = len(res)
+
def view_image(i=0):
im.set_data(res[i])
- #if extra_scalar_dict is not None:
+ # if extra_scalar_dict is not None:
# key = extra_scalr_dict.keys()[0]
# values = extra_scalar_dict.values()
-
- #if extra_scalar_dict is None:
+
+ # if extra_scalar_dict is None:
# ax.set_title(f'{title} {i} {key} {values[i]}')
- #else:
- ax.set_title(f'{title} {i}')
+ # else:
+ ax.set_title(f"{title} {i}")
fig.canvas.draw_idle()
- interact(view_image, i=(0, N-1))
-
-
-#### FCCD specific stuff starts here
-def find_possible_darks(header, dark_gain, search_time, return_debug_info,exposure_time_tolerance = 0.002, db=None):
- darks_possible ={'scan':[],'exp_time':[], 'delta_time':[] }
+ interact(view_image, i=(0, N - 1))
+
+
+# FCCD specific stuff starts here
+
+
+def find_possible_darks(
+ header,
+ dark_gain,
+ search_time,
+ return_debug_info,
+ exposure_time_tolerance=0.002,
+ db=None,
+):
+ darks_possible = {"scan": [], "exp_time": [], "delta_time": []}
start_time = header.start["time"]
stop_time = header.stop["time"]
- if header.stop["exit_status"] != 'abort': #because the key is missing from descriptors, was never recorded
- #try:
- exp_time = header.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time']
- #except:
- #print(header.start["scan_id"])
- #raise
-
-
- hhs = db(since = start_time - search_time, until = start_time, **{'fccd.image': 'dark'}, **{'fccd.gain': dark_gain})
- data = [[h.start["scan_id"], h.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time'],
- start_time-h.start['time']] for h in hhs if getattr(h, 'stop', {}).get('exit_status', 'not done') == 'success']
-
- hhs = db(since = stop_time, until = stop_time + search_time, **{'fccd.image': 'dark'}, **{'fccd.gain': dark_gain})
- data.extend( [[h.start["scan_id"], h.descriptors[0]['configuration']['fccd']['data']['fccd_cam_acquire_time'],
- h.stop['time']-stop_time] for h in hhs if getattr(h, 'stop', {}).get('exit_status', 'not done') == 'success'])
- data=np.array(data)
- #print(data)
- for i,k in enumerate(darks_possible.keys()):
+ if (
+ header.stop["exit_status"] != "abort"
+ ): # because the key is missing from descriptors, was never recorded
+ # try:
+ exp_time = header.descriptors[0]["configuration"]["fccd"]["data"][
+ "fccd_cam_acquire_time"
+ ]
+ # except:
+ # print(header.start["scan_id"])
+ # raise
+
+ hhs = db(
+ since=start_time - search_time,
+ until=start_time,
+ **{"fccd.image": "dark"},
+ **{"fccd.gain": dark_gain},
+ )
+ data = [
+ [
+ h.start["scan_id"],
+ h.descriptors[0]["configuration"]["fccd"]["data"]["fccd_cam_acquire_time"],
+ start_time - h.start["time"],
+ ]
+ for h in hhs
+ if getattr(h, "stop", {}).get("exit_status", "not done") == "success"
+ ]
+
+ hhs = db(
+ since=stop_time,
+ until=stop_time + search_time,
+ **{"fccd.image": "dark"},
+ **{"fccd.gain": dark_gain},
+ )
+ data.extend(
+ [
+ [
+ h.start["scan_id"],
+ h.descriptors[0]["configuration"]["fccd"]["data"][
+ "fccd_cam_acquire_time"
+ ],
+ h.stop["time"] - stop_time,
+ ]
+ for h in hhs
+ if getattr(h, "stop", {}).get("exit_status", "not done") == "success"
+ ]
+ )
+ data = np.array(data)
+ # print(data)
+ for i, k in enumerate(darks_possible.keys()):
try:
- darks_possible[k] = data[:,i]
+ darks_possible[k] = data[:, i]
except IndexError:
darks_possible[k] = None
return darks_possible
-
+
darks_possible = pandas.DataFrame(darks_possible)
- #clean up if exposure times are not within exp_time_tolerance seconds
- darks_possible = darks_possible[darks_possible['exp_time'].apply(np.isclose, b=exp_time, atol=exposure_time_tolerance) == True]
+ # clean up if exposure times are not within exp_time_tolerance seconds
+ darks_possible = darks_possible[
+ darks_possible["exp_time"].apply(
+ np.isclose, b=exp_time, atol=exposure_time_tolerance
+ )
+ ]
-
return darks_possible
-def get_dark_near(header, dark_gain = 'auto', search_time=30*60, return_debug_info = False, db=None):
- """ Find and extract the most relevant dark image (relevant in time and gain setting) for a given scan.
+
+def get_dark_near(
+ header, dark_gain="auto", search_time=30 * 60, return_debug_info=False, db=None
+):
+ """Find and extract the most relevant dark image (relevant in time and gain setting) for a given scan.
header : databroker header of blueksy scan
- dark_gain : string
+ dark_gain : string
match dark gain settings as described in the start document ('auto', 'x2', 'x1')
- search_time : int or float
+ search_time : int or float
time in seconds before (after) the start (stop) document timestamps
-
+
db : Broker.name("csx") is expected. Use databroker v1 or v2 or a wrapped tiled catalog
"""
-
- darks_possible = find_possible_darks(header, dark_gain, search_time, return_debug_info, db=db)
- #print( darks_possible )
+
+ darks_possible = find_possible_darks(
+ header, dark_gain, search_time, return_debug_info, db=db
+ )
+ # print( darks_possible )
try:
- dark = int(darks_possible.sort_values(by='delta_time').reset_index()['scan'][0])
- except:
+ dark = int(darks_possible.sort_values(by="delta_time").reset_index()["scan"][0])
+ except: # noqa: E722
dark = None
return None
@@ -97,9 +147,13 @@ def get_dark_near(header, dark_gain = 'auto', search_time=30*60, return_debug_in
else:
return db[dark]
+
def get_dark_near_all(header, db=None, **kwargs):
- d8,d2,d1 = (get_dark_near(header,dark_gain= dg, db=db, **kwargs) for dg in ['auto','x2','x1'])
- return d8,d2,d1
+ d8, d2, d1 = (
+ get_dark_near(header, dark_gain=dg, db=db, **kwargs)
+ for dg in ["auto", "x2", "x1"]
+ )
+ return d8, d2, d1
def get_fastccd_roi(header, roi_number):
@@ -114,92 +168,108 @@ def get_fastccd_roi(header, roi_number):
-------
named tuple
start_x : int, horizontal starting pixel from left (using output of get_fastccd_images())
- size_x : int, horizontal bin size for ROI
+ size_x : int, horizontal bin size for ROI
start_y : int, vertical starting pixel from top (using output of get_fastccd_images())
size_y : int, vertical bin size for ROI
name : string, name assigned by user in ROI (optional)
-
+
"""
- config = header.descriptors[0]['configuration']['fccd']['data']
- if config == {}: #prior to mid 2017
+ config = header.descriptors[0]["configuration"]["fccd"]["data"]
+ if config == {}: # prior to mid 2017
x_start, x_size, y_start, y_size = None
- logger.warning('Meta data does not exist.')
- #elif config[f'fccd_stats{roi_number}_compute_statistics'] == 'Yes':
+ logger.warning("Meta data does not exist.")
+ # elif config[f'fccd_stats{roi_number}_compute_statistics'] == 'Yes':
else:
- x_start = config[f'fccd_roi{roi_number}_min_xyz_min_x']
- x_size = config[f'fccd_roi{roi_number}_size_x']
- y_start = config[f'fccd_roi{roi_number}_min_xyz_min_y']
- y_size = config[f'fccd_roi{roi_number}_size_y']
- name = config[f'fccd_roi{roi_number}_name_']
-
-
- FCCDroi = namedtuple('FCCDroi', ['start_x', 'size_x', 'start_y', 'size_y', 'name'])
+ x_start = config[f"fccd_roi{roi_number}_min_xyz_min_x"]
+ x_size = config[f"fccd_roi{roi_number}_size_x"]
+ y_start = config[f"fccd_roi{roi_number}_min_xyz_min_y"]
+ y_size = config[f"fccd_roi{roi_number}_size_y"]
+ name = config[f"fccd_roi{roi_number}_name_"]
+
+ FCCDroi = namedtuple("FCCDroi", ["start_x", "size_x", "start_y", "size_y", "name"])
return FCCDroi(x_start, x_size, y_start, y_size, name)
+
def get_fastccd_exp(header):
- """Returns named tuple of exposure time, exposure period and number of images per "point" for a databroker header.
+ """Returns named tuple of exposure time, exposure period and number of images per "point" for a databroker header.
Parameters
----------
header : databroker header
-
+
Returns
-------
named tuple
exp_time : float, exposure time (photon integration) of each image in seconds
- exp_period : float, exposure period time in seconds. the time between consecutive frames for a single "point".
+ exp_period : float, exposure period time in seconds. the time between consecutive frames for a single "point".
Most often used to convert XPCS lag_step (or delays) to "time" from "frames"
num_images : int, number of images per "point".
-
+
"""
- config = header.descriptors[0]['configuration']['fccd']['data']
- if config == {}: #prior to mid 2017
- ## this is done because of deprecated gs.DETS and replaced by descriptors. i don't know if db v2 and tiled even handle this okay.
- ## when we delete data from 2017 we can just delete this part of the code
- exp_t = header.table().get('fccd_acquire_time')[1]
- exp_p = header.table().get('fccd_acquire_period')[1]
- exp_im = header.table().get('fccd_num_images')[1]
- else: #After mid 2017
- exp_t = config['fccd_cam_acquire_time']
- exp_p = config['fccd_cam_acquire_period']
- exp_im = config['fccd_cam_num_images']
-
- FCCDexp = namedtuple('FCCDexposure_config', ['exp_time' , 'exp_period', 'num_images'])
+ config = header.descriptors[0]["configuration"]["fccd"]["data"]
+ if config == {}: # prior to mid 2017
+ # this is done because of deprecated gs.DETS and replaced by descriptors. i don't know if db v2 and tiled even handle this okay.
+ # when we delete data from 2017 we can just delete this part of the code
+ exp_t = header.table().get("fccd_acquire_time")[1]
+ exp_p = header.table().get("fccd_acquire_period")[1]
+ exp_im = header.table().get("fccd_num_images")[1]
+ else: # After mid 2017
+ exp_t = config["fccd_cam_acquire_time"]
+ exp_p = config["fccd_cam_acquire_period"]
+ exp_im = config["fccd_cam_num_images"]
+
+ FCCDexp = namedtuple(
+ "FCCDexposure_config", ["exp_time", "exp_period", "num_images"]
+ )
return FCCDexp(exp_t, exp_p, exp_im)
+
def get_fastccd_pixel_readout(header):
- """Returns named tuple of details needed to properly concatenate the fccd images.
+ """Returns named tuple of details needed to properly concatenate the fccd images.
Parameters
----------
header : databroker header
-
+
Returns
-------
named tuple
overscan_cols : int, confgured by timing file for the number of virtual columns for dark current noise
rows : int, number of raws for framestore versus nonframestore mode, as instituted by FCCD plugin for EPICS AreaDectector
row_offset : int, unused virtual pixels to be removed, as instituted by FCCD plugin for EPICS AreaDectector
-
+
"""
- config = header.descriptors[0]['configuration']['fccd']['data']
+ config = header.descriptors[0]["configuration"]["fccd"]["data"]
try:
- overscan_cols = config['fccd_cam_overscan_cols'] #this is hardware config
- except:
- overscan_cols = 'unknown' #can code using tiled to infer by Xarray shape; test setting to None
+ overscan_cols = config["fccd_cam_overscan_cols"] # this is hardware config
+ except: # noqa: E722
+ overscan_cols = "unknown" # can code using tiled to infer by Xarray shape; test setting to None
try:
- rows = config['fccd_fccd1_rows']
- row_offset = config['fccd_fccd1_row_offset']
- except:
- rows = 'unknown' ##need to rely on hardcoded concatenation ; test setting to None
- row_offset = 'unknown' ##need to rely on hardcoded concatenation ; test setting to None
-
- FCCDconcat = namedtuple('FCCDconcat', ['overscan_cols' , 'rows', 'row_offset'])
+ rows = config["fccd_fccd1_rows"]
+ row_offset = config["fccd_fccd1_row_offset"]
+ except: # noqa: E722
+ rows = (
+ "unknown" # need to rely on hardcoded concatenation ; test setting to None
+ )
+ row_offset = (
+ "unknown" # need to rely on hardcoded concatenation ; test setting to None
+ )
+
+ FCCDconcat = namedtuple("FCCDconcat", ["overscan_cols", "rows", "row_offset"])
return FCCDconcat(overscan_cols, rows, row_offset)
-def get_fastccd_images_sized(header, dark_headers=None, flat=None, auto_concat = True, auto_overscan=True, return_overscan_array = False, drop_overscan=True):
+
+def get_fastccd_images_sized(
+ header,
+ dark_headers=None,
+ flat=None,
+ auto_concat=True,
+ auto_overscan=True,
+ return_overscan_array=False,
+ drop_overscan=True,
+):
"""Normalazied images with proper concatenation and overscan data by calling get_fastccd_images
Parameters
----------
- light_header : databorker header
+ light_header : databorker header
dark_headers : tuple of 3 databroker headers , optional
These headers are the dark images. The tuple should be formed
@@ -209,153 +279,198 @@ def get_fastccd_images_sized(header, dark_headers=None, flat=None, auto_concat =
flat : array_like
Array to use for the flatfield correction. This should be a 2D
- array sized as the last two dimensions of the image stack.
+ array sized as the last two dimensions of the image stack.
See csxtools.utilities.get_flatfield() and use plan_name count_flatfield.
-
+
auto_concat : Boolean
True to remove un-needed vertical pixels
-
+
auto_overscan : Boolean
True to correct images with overscan data and remove overscan data
from the array
-
+
return_overscan_array : Boolean
False to not return the overscan data as a seperate array (broadcastable)
drop_overscan: Boolean
- If auto_overscan False, choose to keep or drop the overscan data from
+ If auto_overscan False, choose to keep or drop the overscan data from
the returned data images
-
-
-
+
+
+
Returns
-------
- images : 4D array (points, frames-per-point, Vpixels, Hpixels)
+ images : 4D array (points, frames-per-point, Vpixels, Hpixels)
Normalized fastccd data.
overscan_data : OPTIONAL 4D array (points, frames-per-point, Vpixel, Hpixels)
- Extracted overscan data (2 Vpixels for ever 10 Vpixels).
+ Extracted overscan data (2 Vpixels for ever 10 Vpixels).
auto_concat_performed : Boolean
-
+
auto_os_drop_performed : Boolean
-
+
auto_os_correct_performed : Boolean
-
+
"""
-
-
- #print('Processing scan {}'.format(header['start']['scan_id']))
+
+ # print('Processing scan {}'.format(header['start']['scan_id']))
images = get_fastccd_images(header, dark_headers, flat=flat)
- ###TODO write if statement for image shape if the output is an array (future csxtools upgrade), then there is no need for next 2 lines
+ # TODO write if statement for image shape if the output is an array (future csxtools upgrade), then there is no need for next 2 lines
stack = get_images_to_4D(images)
images = stack
- total_rows = images.shape[-1] #TODO add to descriptors for image output saving?, but dan must have it somewhere in the handler.
+ total_rows = images.shape[
+ -1
+ ] # TODO add to descriptors for image output saving?, but dan must have it somewhere in the handler.
fccd_concat_params = get_fastccd_pixel_readout(header)
-
- #### SEE IF OVERSCAN WAS ENABLED
+
+ # SEE IF OVERSCAN WAS ENABLED
if fccd_concat_params.overscan_cols != 2:
images_have_overscan = None
- #TODO future elif to look at shape of data (1132 pix, not 960)
+ # TODO future elif to look at shape of data (1132 pix, not 960)
else:
- images_have_overscan = True #TODO later, go back and add code later to capture the overscan data
-
- ### make FCCD images the correct shape (except for overscan)
+ images_have_overscan = (
+ True # TODO later, go back and add code later to capture the overscan data
+ )
+
+ # make FCCD images the correct shape (except for overscan)
if auto_concat:
- if fccd_concat_params.rows != 'unknown': #goback and change to None when testing
- leftstart = fccd_concat_params.row_offset+1 ##TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?)
- leftend = fccd_concat_params.rows +fccd_concat_params.row_offset
- rightstart = total_rows - fccd_concat_params.row_offset -fccd_concat_params.rows
+ if (
+ fccd_concat_params.rows != "unknown"
+ ): # goback and change to None when testing
+ leftstart = (
+ fccd_concat_params.row_offset + 1
+ ) # TODO make sure it works for non-framestore (is it 'fccd_cam_image_mode'=2?)
+ leftend = fccd_concat_params.rows + fccd_concat_params.row_offset
+ rightstart = (
+ total_rows - fccd_concat_params.row_offset - fccd_concat_params.rows
+ )
rightend = total_rows - fccd_concat_params.row_offset + 1
else:
- logging.warning('Concatenating images based on hard-coded values')
- #auto_concat = False ## this seems useless. should do soemthing to return that it was hard-code autoconcat
- if total_rows > 1001: ##because non-framestore
- logging.warning(f'images are larger than 960 pixels (possibly non-FS mode). The first image shape is {images[0,0].shape}')
+ logging.warning("Concatenating images based on hard-coded values")
+ # auto_concat = False ## this seems useless. should do soemthing to return that it was hard-code autoconcat
+ if total_rows > 1001: # because non-framestore
+ logging.warning(
+ f"images are larger than 960 pixels (possibly non-FS mode). The first image shape is {images[0,0].shape}"
+ )
leftstart = 486
leftend = 966
- rightstart = 1034
- rightend = 1514
+ rightstart = 1034
+ rightend = 1514
elif total_rows == 1000:
leftstart = 7
leftend = 486
- rightstart = 514
- rightend = 995
+ rightstart = 514
+ rightend = 995
else:
- logging.warning(f'images are unexpected size for auto-concatenation. The first image shape is {images[0,0].shape}. ')
- auto_concat = False
+ logging.warning(
+ f"images are unexpected size for auto-concatenation. The first image shape is {images[0,0].shape}. "
+ )
+ auto_concat = False
auto_concat_performed = False
if auto_concat:
- print(leftstart, leftend, rightstart, rightend) #TODO add this to verbose warnings level
- images = np.concatenate((images[:,:,:,leftstart : leftend],images[:,:,:, rightstart:rightend]),axis=3)
+ print(
+ leftstart, leftend, rightstart, rightend
+ ) # TODO add this to verbose warnings level
+ images = np.concatenate(
+ (
+ images[:, :, :, leftstart:leftend],
+ images[:, :, :, rightstart:rightend],
+ ),
+ axis=3,
+ )
auto_concat_performed = True
-
- ### if older images, overscan will not be in metadata, but it should be clear from the number of columns (960/10*2)+960=1152
+
+ # if older images, overscan will not be in metadata, but it should be clear from the number of columns (960/10*2)+960=1152
if images.shape[-2] == 1152:
- logging.warning(f'Overscan columns (2 per 10) are detected. {images_have_overscan}')
- #if images_have_overscan == 'unknown':
- logging.warning('Attempting to apply overscan removal')
- images_have_overscan = True ###TODO this means we also have to return this
-
- ### deal with overscan if present
+ logging.warning(
+ f"Overscan columns (2 per 10) are detected. {images_have_overscan}"
+ )
+ # if images_have_overscan == 'unknown':
+ logging.warning("Attempting to apply overscan removal")
+ images_have_overscan = True # TODO this means we also have to return this
+
+ # deal with overscan if present
if auto_overscan and images_have_overscan:
- overscan_data = get_os_correction_images(images) ## this is "broadcastable" with images
- print(overscan_data.shape, 'os data returned in same shape as images should be')
- images = get_os_dropped_images(np.copy(images))
- print(images.shape, 'os dropped and substracting overscan')
+ overscan_data = get_os_correction_images(
+ images
+ ) # this is "broadcastable" with images
+ print(overscan_data.shape, "os data returned in same shape as images should be")
+ images = get_os_dropped_images(np.copy(images))
+ print(images.shape, "os dropped and substracting overscan")
auto_os_drop_performed = True
images = images - overscan_data
auto_os_correct_performed = True
- elif auto_overscan == False and images_have_overscan and drop_overscan:
- images = get_os_dropped_images(np.copy(images))
- print(images.shape,'only dropping os from images')
+ elif not auto_overscan and images_have_overscan and drop_overscan:
+ images = get_os_dropped_images(np.copy(images))
+ print(images.shape, "only dropping os from images")
auto_os_drop_performed = True
auto_os_correct_performed = False
- elif auto_overscan == False and images_have_overscan and drop_overscan == False:
- print(images.shape,'retaining os in returned data images')
+ elif not auto_overscan and images_have_overscan and not drop_overscan:
+ print(images.shape, "retaining os in returned data images")
auto_os_drop_performed = False
auto_os_correct_performed = False
else:
auto_os_drop_performed = False
auto_os_correct_performed = False
-
+
if return_overscan_array:
- return images, overscan_data, auto_concat_performed, auto_os_drop_performed, auto_os_correct_performed
+ return (
+ images,
+ overscan_data,
+ auto_concat_performed,
+ auto_os_drop_performed,
+ auto_os_correct_performed,
+ )
else:
- return images, auto_concat_performed, auto_os_drop_performed, auto_os_correct_performed
-
+ return (
+ images,
+ auto_concat_performed,
+ auto_os_drop_performed,
+ auto_os_correct_performed,
+ )
+
-def convert_photons(images_input, energy, ADU_930 = 30, quantize_photons = True, make_int_strip_nan= True, round_to_tens=True):
- """Convert ADU to photons based on incident beamline energy. FCCD #2 found to be ~30 ADU fro 930eV (ideally 25 ADU).
+def convert_photons(
+ images_input,
+ energy,
+ ADU_930=30,
+ quantize_photons=True,
+ make_int_strip_nan=True,
+ round_to_tens=True,
+):
+ """Convert ADU to photons based on incident beamline energy. FCCD #2 found to be ~30 ADU fro 930eV (ideally 25 ADU).
Quantized to photons may be problematic in the realm of 4 photon events per pixel. We should add some histogram information.
-
+
Parameters
----------
images_input : numpy array
energy : float, incident photon energy
-
+
quantize_photons : rounds pixel values to one's place. returns float or int based on make_int_strip_nan
make_int_strip_nan : converts rounded pixel values to integers and then NaNs are very near zero
-
+
Returns
-------
images_output : numpy array converted to photons
-
+
#TODO seems to retain nan's need to use a mask to prevent pixels with nan
#TODO do more testing to make sure rounding is alway appropriate scheme (or at all)
- #TODO it seems that simple rounding creates +/- 4 photon error around "zero" photons
+ #TODO it seems that simple rounding creates +/- 4 photon error around "zero" photons
"""
if round_to_tens:
- ADUpPH = round(ADU_930*np.nanmean(energy)/930, -1) #TODO should be ok and more consistent, but need to check with energyscans,
+ ADUpPH = round(
+ ADU_930 * np.nanmean(energy) / 930, -1
+ ) # TODO should be ok and more consistent, but need to check with energyscans,
else:
- ADUpPH = round(ADU_930*np.nanmean(energy)/930, 2)
+ ADUpPH = round(ADU_930 * np.nanmean(energy) / 930, 2)
images_input = images_input / ADUpPH
- if quantize_photons == True:
- if make_int_strip_nan == True:
- images_output = np.round(images_input).astype('int')
- else:
+ if quantize_photons:
+ if make_int_strip_nan:
+ images_output = np.round(images_input).astype("int")
+ else:
images_output = np.round(images_input)
- else:
+ else:
images_output = images_input
return images_output, energy, ADU_930, ADUpPH
diff --git a/csxtools/helpers/overscan.py b/csxtools/helpers/overscan.py
index 43f1ab6..1cfbed0 100644
--- a/csxtools/helpers/overscan.py
+++ b/csxtools/helpers/overscan.py
@@ -1,24 +1,27 @@
import numpy as np
+
def _extract_from_fccdwithOS_osdata(images, os_cols, data_cols):
- if len(images.shape) !=4:
- print(f'Input images should be 4D.')
+ if len(images.shape) != 4:
+ print("Input images should be 4D.")
raise
- #print(images.shape)
+ # print(images.shape)
points, frames, total_cols, horz_pix = images.shape
- super_cols = int(total_cols / (os_cols+data_cols))
- os_cols_data = np.zeros((os_cols, points, frames, super_cols, horz_pix), )
-
- #print(f'{os_cols_data.shape=}')
-
+ super_cols = int(total_cols / (os_cols + data_cols))
+ os_cols_data = np.zeros(
+ (os_cols, points, frames, super_cols, horz_pix),
+ )
+
+ # print(f'{os_cols_data.shape=}')
for i in range(os_cols):
- #print(i)
- #print(f'\t{os_cols+data_cols}')
- os_cols_data[i] = images[:, :, i::os_cols+data_cols, :]
-
+ # print(i)
+ # print(f'\t{os_cols+data_cols}')
+ os_cols_data[i] = images[:, :, i :: os_cols + data_cols, :]
+
return os_cols_data
+
# def extract_from_fccdwithOS_photondata(images, os_cols, data_cols):
# if len(images.shape) !=4:
# print(f'Input images should be 4D.')
@@ -29,116 +32,134 @@ def _extract_from_fccdwithOS_osdata(images, os_cols, data_cols):
# for i in range(data_cols):
# data_cols_data[i] = ar_images[:, :, i+os_cols::os_cols+data_cols, :]
-
+
# return data_cols_data
-def _make_os_correction_data(os_data, os_cols, data_cols, images_data_shape, ):
- #print(f'{os_data.shape=}')
- if len(images_data_shape) !=4 and len(os_data.shape) != 4:
- print(f'Input images should be 4D.')
+
+def _make_os_correction_data(
+ os_data,
+ os_cols,
+ data_cols,
+ images_data_shape,
+):
+ # print(f'{os_data.shape=}')
+ if len(images_data_shape) != 4 and len(os_data.shape) != 4:
+ print("Input images should be 4D.")
raise
points, frames, total_cols, horz_pix = images_data_shape
- super_cols = int(total_cols / (os_cols+data_cols))
+ super_cols = int(total_cols / (os_cols + data_cols))
vert_pix = super_cols * data_cols
-
- os_data_for_broadcast = np.zeros((points, frames, vert_pix , horz_pix ))
- #print(f'{os_data_for_broadcast.shape=}')
+
+ os_data_for_broadcast = np.zeros((points, frames, vert_pix, horz_pix))
+ # print(f'{os_data_for_broadcast.shape=}')
for i in range(super_cols):
- #print(i)
- temp = os_data[:,:,i, :].reshape(points, frames, 1, horz_pix)
+ # print(i)
+ temp = os_data[:, :, i, :].reshape(points, frames, 1, horz_pix)
os_supercol_data = np.broadcast_to(temp, (points, frames, data_cols, horz_pix))
- #print(f'\t{os_supercol_data=}')
- #print(f'\t{os_supercol_data.shape=}')
- start, stop = i*(data_cols), data_cols*(i+1)
- #print(f'\t{start} : {stop}')
- os_data_for_broadcast[:,:, start : stop , :] = os_supercol_data
-
+ # print(f'\t{os_supercol_data=}')
+ # print(f'\t{os_supercol_data.shape=}')
+ start, stop = i * (data_cols), data_cols * (i + 1)
+ # print(f'\t{start} : {stop}')
+ os_data_for_broadcast[:, :, start:stop, :] = os_supercol_data
+
return os_data_for_broadcast
-
+
+
def _drop_os_data(images, os_cols, data_cols):
- if len(images.shape) !=4:
- print(f'Input images should be 4D.')
+ if len(images.shape) != 4:
+ print("Input images should be 4D.")
raise
points, frames, total_cols, horz_pix = images.shape
- super_cols = int(total_cols / (os_cols+data_cols))
+ super_cols = int(total_cols / (os_cols + data_cols))
vert_pix = super_cols * data_cols
- images_no_os = np.zeros(( points, frames, vert_pix, horz_pix) )
- #print(f'{images_no_os.shape=}')
-
+ images_no_os = np.zeros((points, frames, vert_pix, horz_pix))
+ # print(f'{images_no_os.shape=}')
+
for i in range(super_cols):
- #print(i)
- start_extract, stop_extract = i*(data_cols+os_cols)+os_cols, (data_cols+os_cols)*(i+1)#+os_cols
- #print(f'\tOUT OF {start_extract}:{stop_extract}')
- temp = images[:,:,start_extract:stop_extract, :]
- #print(f'\t{temp.shape}')
- start_in, stop_in = i*data_cols, i*data_cols+data_cols
- #print(f'\tINTO {start_in}:{stop_in}')
- #target = images_no_os[:,:, start_in : stop_in , :]
- #print(f'\t{target.shape}')
- images_no_os[:,:, start_in : stop_in , :] = temp
-
- #print(f'{images_no_os.shape=}')
-
+ # print(i)
+ start_extract, stop_extract = i * (data_cols + os_cols) + os_cols, (
+ data_cols + os_cols
+ ) * (
+ i + 1
+ ) # +os_cols
+ # print(f'\tOUT OF {start_extract}:{stop_extract}')
+ temp = images[:, :, start_extract:stop_extract, :]
+ # print(f'\t{temp.shape}')
+ start_in, stop_in = i * data_cols, i * data_cols + data_cols
+ # print(f'\tINTO {start_in}:{stop_in}')
+ # target = images_no_os[:,:, start_in : stop_in , :]
+ # print(f'\t{target.shape}')
+ images_no_os[:, :, start_in:stop_in, :] = temp
+
+ # print(f'{images_no_os.shape=}')
+
return images_no_os
+
def _make_left_right(images):
horz_pix = images.shape[-1]
- imgs_left = np.flip(np.copy(images[:,:,:,0:int(horz_pix/2)]))
- imgs_right = np.copy(images[:,:,:,int(horz_pix/2):horz_pix])
-
+ imgs_left = np.flip(np.copy(images[:, :, :, 0 : int(horz_pix / 2)]))
+ imgs_right = np.copy(images[:, :, :, int(horz_pix / 2) : horz_pix])
+
return imgs_left, imgs_right
-#def _make_whole_from_left_right(images_left, images_right):
+
+# def _make_whole_from_left_right(images_left, images_right):
# images = np.concatenate((np.flip(images_left), images_right), axis=-1)
-
-def get_os_correction_images(images, os_cols=2, data_cols=10, os_mean=True, os_single_col=None):
- if os_mean == 'False' and os_single_col is None:
- print('select nth column if not using mean')
- raise
-
- images_left, images_right = _make_left_right(images)
- #print(images_left.shape, images_right.shape)
-
+def get_os_correction_images(
+ images, os_cols=2, data_cols=10, os_mean=True, os_single_col=None
+):
+
+ if os_mean == "False" and os_single_col is None:
+ print("select nth column if not using mean")
+ raise ValueError("Must provide os_single_col if os_mean is False")
+
+ images_left, images_right = _make_left_right(images)
+ # print(images_left.shape, images_right.shape)
+
os_extract_left = _extract_from_fccdwithOS_osdata(images_left, os_cols, data_cols)
os_extract_right = _extract_from_fccdwithOS_osdata(images_left, os_cols, data_cols)
-
- #print(os_extract_left.shape, os_extract_right.shape)
+
+ # print(os_extract_left.shape, os_extract_right.shape)
if os_mean:
- os_imgs_left = _make_os_correction_data(np.mean(os_extract_left, axis=0),
- os_cols, data_cols, images_left.shape )
- os_imgs_right = _make_os_correction_data(np.mean(os_extract_right, axis=0),
- os_cols, data_cols, images_right.shape )
+ os_imgs_left = _make_os_correction_data(
+ np.mean(os_extract_left, axis=0), os_cols, data_cols, images_left.shape
+ )
+ os_imgs_right = _make_os_correction_data(
+ np.mean(os_extract_right, axis=0), os_cols, data_cols, images_right.shape
+ )
else:
- os_imgs_left = _make_os_correction_data(os_extract_left[os_single_col],
- os_cols, data_cols, images_left.shape )
- os_single_col = int(not os_single_col )#preserving readout order, not location in flipped array
- os_imgs_right = _make_os_correction_data(s_extract_right[os_single_col ],
- os_cols, data_cols, images_right.shape )
-
- #print(os_imgs_left.shape, os_imgs_right.shape)
+ os_imgs_left = _make_os_correction_data(
+ os_extract_left[os_single_col], os_cols, data_cols, images_left.shape
+ )
+ os_single_col = int(
+ not os_single_col
+ ) # preserving readout order, not location in flipped array
+ os_imgs_right = _make_os_correction_data(
+ os_extract_right[os_single_col], os_cols, data_cols, images_right.shape
+ )
+
+ # print(os_imgs_left.shape, os_imgs_right.shape)
os_imgs = np.concatenate((np.flip(os_imgs_left), os_imgs_right), axis=-1)
-
- #print(os_imgs.shape)
-
+
+ # print(os_imgs.shape)
+
return os_imgs
-
-
+
+
def get_os_dropped_images(images, os_cols=2, data_cols=10):
- imgs_left, imgs_right = _make_left_right(images)
-
+ imgs_left, imgs_right = _make_left_right(images)
+
imgs_left_no_os = _drop_os_data(imgs_left, os_cols, data_cols)
imgs_right_no_os = _drop_os_data(imgs_right, os_cols, data_cols)
-
- #print(f'{imgs_left_no_os.shape=}')
-
+
+ # print(f'{imgs_left_no_os.shape=}')
+
images = np.concatenate((np.flip(imgs_left_no_os), imgs_right_no_os), axis=-1)
- #images = _make_whole_from_left_right(imgs_left_no_os, imgs_right_no_os)
- #print(f'{images.shape=}')
-
+ # images = _make_whole_from_left_right(imgs_left_no_os, imgs_right_no_os)
+ # print(f'{images.shape=}')
+
return images
-
-
-
diff --git a/csxtools/image/__init__.py b/csxtools/image/__init__.py
index a7d76f6..873e098 100644
--- a/csxtools/image/__init__.py
+++ b/csxtools/image/__init__.py
@@ -1,11 +1,27 @@
from .transform import rotate90
-from .stack import (stackmean, stacksum, stackvar, stackstderr, stackstd,
- images_mean, images_sum)
+from .stack import (
+ stackmean,
+ stacksum,
+ stackvar,
+ stackstderr,
+ stackstd,
+ images_mean,
+ images_sum,
+)
-__all__ = ['rotate90', 'stackmean', 'stacksum', 'stackvar', 'stackstderr',
- 'stackstd', 'images_mean', 'images_sum']
+__all__ = [
+ "rotate90",
+ "stackmean",
+ "stacksum",
+ "stackvar",
+ "stackstderr",
+ "stackstd",
+ "images_mean",
+ "images_sum",
+]
# set version string using versioneer
from .._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
del get_versions
diff --git a/csxtools/image/stack.py b/csxtools/image/stack.py
index 050c551..7e4b87c 100644
--- a/csxtools/image/stack.py
+++ b/csxtools/image/stack.py
@@ -2,6 +2,7 @@
from ..ext import image as extimage
import logging
+
logger = logging.getLogger(__name__)
diff --git a/csxtools/image/transform.py b/csxtools/image/transform.py
index 52ae75c..0751688 100644
--- a/csxtools/image/transform.py
+++ b/csxtools/image/transform.py
@@ -1,7 +1,7 @@
from ..ext import image as extimage
-def rotate90(a, sense='ccw'):
+def rotate90(a, sense="ccw"):
"""Rotate a stack of images by 90 degrees
This routine rotates a stack of images by 90. The rotation is performed
@@ -22,9 +22,9 @@ def rotate90(a, sense='ccw'):
"""
- if sense == 'ccw':
+ if sense == "ccw":
sense = 1
- elif sense == 'cw':
+ elif sense == "cw":
sense = 0
else:
raise ValueError("sense must be 'cw' or 'ccw'")
diff --git a/csxtools/image_corr.py b/csxtools/image_corr.py
index de4efae..0058609 100644
--- a/csxtools/image_corr.py
+++ b/csxtools/image_corr.py
@@ -7,37 +7,42 @@
def correct_events(evs, data_key, dark_images, drop_raw=False):
- out_data_key = data_key + '_corrected'
+ out_data_key = data_key + "_corrected"
ev0 = next(evs)
- new_desc = dict(ev0['descriptor'])
- new_desc['data_keys'][out_data_key] = dict(new_desc['data_keys'][data_key])
- new_desc['data_keys'][out_data_key]['source'] = 'subtract_background'
- new_desc['uid'] = uuid.uuid4()
+ new_desc = dict(ev0["descriptor"])
+ new_desc["data_keys"][out_data_key] = dict(new_desc["data_keys"][data_key])
+ new_desc["data_keys"][out_data_key]["source"] = "subtract_background"
+ new_desc["uid"] = uuid.uuid4()
if drop_raw:
- new_desc['data_keys'].pop(data_key)
- for ev in chain((ev0, ), evs):
- new_ev = {'uid': str(uuid.uuid4()),
- 'time': ttime.time(),
- 'descriptor': new_desc,
- 'seq_no': ev['seq_no'],
- 'data': dict(ev['data']),
- 'timestamps': dict(ev['timestamps'])}
- corr, gain_img = subtract_background(ev['data'][data_key], dark_images) # noqa F821 TODO
- new_ev['data'][out_data_key] = corr
- new_ev['timestamps'][out_data_key] = ttime.time()
+ new_desc["data_keys"].pop(data_key)
+ for ev in chain((ev0,), evs):
+ new_ev = {
+ "uid": str(uuid.uuid4()),
+ "time": ttime.time(),
+ "descriptor": new_desc,
+ "seq_no": ev["seq_no"],
+ "data": dict(ev["data"]),
+ "timestamps": dict(ev["timestamps"]),
+ }
+ # TODO (nisarnk): replace stub with actual subtract_background implementation
+ corr, gain_img = subtract_background( # noqa F821
+ ev["data"][data_key], dark_images
+ )
+ new_ev["data"][out_data_key] = corr
+ new_ev["timestamps"][out_data_key] = ttime.time()
if drop_raw:
- new_ev['data'].pop(data_key)
- new_ev['timestamps'].pop(data_key)
+ new_ev["data"].pop(data_key)
+ new_ev["timestamps"].pop(data_key)
yield new_ev
def clean_images(header, pivot_key, timesource_key, dark_images=None, static_keys=None):
if static_keys is None:
- static_keys = ['sx', 'sy', 'temp_a', 'temp_b', 'sz']
+ static_keys = ["sx", "sy", "temp_a", "temp_b", "sz"]
# sort out which descriptor has the key we want to pivot on
- pv_desc = [d for d in header['descriptors'] if pivot_key in d['data_keys']][0]
+ pv_desc = [d for d in header["descriptors"] if pivot_key in d["data_keys"]][0]
# sort out which descriptor has the key that we want to zip with to get time stamps
- ts_desc = [d for d in header['descriptors'] if timesource_key in d['data_keys']][0]
+ ts_desc = [d for d in header["descriptors"] if timesource_key in d["data_keys"]][0]
ts_events = get_events_generator(ts_desc)
pv_events = get_events_generator(pv_desc)
@@ -53,9 +58,9 @@ def clean_images(header, pivot_key, timesource_key, dark_images=None, static_key
def extract_darkfield(header, dark_key):
- cam_desc = [d for d in header['descriptors'] if dark_key in d['data_keys']][0]
+ cam_desc = [d for d in header["descriptors"] if dark_key in d["data_keys"]][0]
events = get_events_generator(cam_desc)
events = list(((ev, fill_event(ev))[0] for ev in events))
event = events[0]
- ims = (event['data'][dark_key] << 2) >> 2
+ ims = (event["data"][dark_key] << 2) >> 2
return ims.mean(axis=0)
diff --git a/csxtools/ipynb/__init__.py b/csxtools/ipynb/__init__.py
index 2e07fc5..2b3a880 100644
--- a/csxtools/ipynb/__init__.py
+++ b/csxtools/ipynb/__init__.py
@@ -3,8 +3,8 @@
# set version string using versioneer
from .._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
del get_versions
-__all__ = ['image_stack_to_movie', 'show_image_stack',
- 'notebook_to_nbviewer']
+__all__ = ["image_stack_to_movie", "show_image_stack", "notebook_to_nbviewer"]
diff --git a/csxtools/ipynb/animation.py b/csxtools/ipynb/animation.py
index fc34ceb..32e9ee4 100644
--- a/csxtools/ipynb/animation.py
+++ b/csxtools/ipynb/animation.py
@@ -6,8 +6,14 @@
import base64
-def show_image_stack(images, minmax, fontsize=18, cmap='CMRmap',
- zlabel=r'Intensty [ADU]', figsize=(12, 10)):
+def show_image_stack(
+ images,
+ minmax,
+ fontsize=18,
+ cmap="CMRmap",
+ zlabel=r"Intensty [ADU]",
+ figsize=(12, 10),
+):
"""Show an Interactive Image Stack in an IPython Notebook
Parameters
@@ -35,28 +41,34 @@ def view_frame(i, vmin, vmax):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
- im = ax.imshow(images[i], cmap=cmap, interpolation='none',
- vmin=vmin, vmax=vmax)
+ im = ax.imshow(images[i], cmap=cmap, interpolation="none", vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im)
cbar.ax.tick_params(labelsize=fontsize)
- cbar.set_label(zlabel, size=fontsize, weight='bold')
-
- ax.set_title('Frame {} Min = {} Max = {}'.format(i, vmin, vmax),
- fontsize=fontsize, fontweight='bold')
-
- for item in ([ax.xaxis.label, ax.yaxis.label] +
- ax.get_xticklabels() + ax.get_yticklabels()):
+ cbar.set_label(zlabel, size=fontsize, weight="bold")
+
+ ax.set_title(
+ "Frame {} Min = {} Max = {}".format(i, vmin, vmax),
+ fontsize=fontsize,
+ fontweight="bold",
+ )
+
+ for item in (
+ [ax.xaxis.label, ax.yaxis.label]
+ + ax.get_xticklabels()
+ + ax.get_yticklabels()
+ ):
item.set_fontsize(fontsize)
- item.set_fontweight('bold')
+ item.set_fontweight("bold")
plt.show()
- interact(view_frame, i=(0, n-1), vmin=minmax, vmax=minmax)
+ interact(view_frame, i=(0, n - 1), vmin=minmax, vmax=minmax)
-def image_stack_to_movie(images, frames=None, vmin=None, vmax=None,
- figsize=(6, 5), cmap='CMRmap', fps=10):
+def image_stack_to_movie(
+ images, frames=None, vmin=None, vmax=None, figsize=(6, 5), cmap="CMRmap", fps=10
+):
"""Convert image stack to movie and show in notebook.
Parameters
@@ -82,23 +94,25 @@ def image_stack_to_movie(images, frames=None, vmin=None, vmax=None,
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
- im = plt.imshow(images[1], vmin=vmin, vmax=vmax, cmap=cmap,
- interpolation='none')
+ im = plt.imshow(images[1], vmin=vmin, vmax=vmax, cmap=cmap, interpolation="none")
cbar = fig.colorbar(im)
cbar.ax.tick_params(labelsize=14)
- cbar.set_label(r"Intensity [ADU]", size=14,)
- for item in ([ax.xaxis.label, ax.yaxis.label] +
- ax.get_xticklabels() + ax.get_yticklabels()):
+ cbar.set_label(
+ r"Intensity [ADU]",
+ size=14,
+ )
+ for item in (
+ [ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()
+ ):
item.set_fontsize(14)
- item.set_fontweight('bold')
+ item.set_fontweight("bold")
def animate(i):
im.set_array(images[i])
- ax.set_title('Frame {}'.format(i), fontsize=16, fontweight='bold')
- return im,
+ ax.set_title("Frame {}".format(i), fontsize=16, fontweight="bold")
+ return (im,)
- anim = animation.FuncAnimation(fig, animate, frames=frames,
- interval=1, blit=True)
+ anim = animation.FuncAnimation(fig, animate, frames=frames, interval=1, blit=True)
plt.close(anim._fig)
# return anim.to_html5_video()
return HTML(_anim_to_html(anim, fps))
@@ -110,11 +124,13 @@ def _anim_to_html(anim, fps):
Your browser does not support the video tag.
"""
- if not hasattr(anim, '_encoded_video'):
- with NamedTemporaryFile(suffix='.mp4') as f:
- anim.save(f.name, fps=fps,
- extra_args=['-vcodec', 'libx264',
- '-pix_fmt', 'yuv420p'])
+ if not hasattr(anim, "_encoded_video"):
+ with NamedTemporaryFile(suffix=".mp4") as f:
+ anim.save(
+ f.name,
+ fps=fps,
+ extra_args=["-vcodec", "libx264", "-pix_fmt", "yuv420p"],
+ )
video = open(f.name, "rb").read()
anim._encoded_video = base64.b64encode(video)
return VIDEO_TAG.format(anim._encoded_video.decode("utf-8"))
diff --git a/csxtools/ipynb/nbviewer.py b/csxtools/ipynb/nbviewer.py
index 0bfc33d..3fe811f 100644
--- a/csxtools/ipynb/nbviewer.py
+++ b/csxtools/ipynb/nbviewer.py
@@ -32,6 +32,6 @@ def notebook_to_nbviewer():
js = _js_callback_open + _js
html = ''
- html += 'nbviewer will open in a new tab in 20 seconds .....'
+ html += ""
+ html += "nbviewer will open in a new tab in 20 seconds ....."
return display(HTML(html))
diff --git a/csxtools/plotting.py b/csxtools/plotting.py
index 83bd09e..88b8bb1 100644
--- a/csxtools/plotting.py
+++ b/csxtools/plotting.py
@@ -1,12 +1,12 @@
import numpy as np
from matplotlib import pyplot as plt
-golden_mean = (np.sqrt(5)-1.0)/2.0
+golden_mean = (np.sqrt(5) - 1.0) / 2.0
-def make_panel_plot(n, fig=None,
- xlmargin=0.15, ytmargin=0.10,
- xrmargin=0.05, ybmargin=0.10):
+def make_panel_plot(
+ n, fig=None, xlmargin=0.15, ytmargin=0.10, xrmargin=0.05, ybmargin=0.10
+):
"""Make a multi panel plot using matplotlib
This function, makes a typical panel plot and returns a list
@@ -33,8 +33,8 @@ def make_panel_plot(n, fig=None,
if fig is None:
fig = plt.figure(figsize=[6, 6 * golden_mean * n])
- xsize = (1. - (xlmargin + xrmargin))
- ysize = (1. - (ybmargin + ytmargin)) / n
+ xsize = 1.0 - (xlmargin + xrmargin)
+ ysize = (1.0 - (ybmargin + ytmargin)) / n
pos = np.array([xlmargin, ybmargin, xsize, ysize])
diff --git a/csxtools/settings.py b/csxtools/settings.py
index 3ae520e..cdd9c41 100644
--- a/csxtools/settings.py
+++ b/csxtools/settings.py
@@ -1,3 +1,8 @@
detectors = {}
-detectors['fccd'] = 'fccd_image'
-diff_angles = ['delta', 'theta', 'gamma', None, None, None]
+detectors["fccd"] = "fccd_image"
+detectors["axis1"] = "axis1_image"
+detectors["axis"] = "axis_image"
+detectors["axis_standard"] = "axis_standard_image"
+detectors["axis_cont"] = "axis_cont_image"
+
+diff_angles = ["delta", "theta", "gamma", None, None, None]
diff --git a/csxtools/utils.py b/csxtools/utils.py
index 2749568..38795a8 100644
--- a/csxtools/utils.py
+++ b/csxtools/utils.py
@@ -2,16 +2,19 @@
import time as ttime
from .fastccd import correct_images
+from .axis1 import correct_images_axis
from .image import rotate90, stackmean
from .settings import detectors
from databroker.assets.handlers import AreaDetectorHDF5TimestampHandler
import logging
+
logger = logging.getLogger(__name__)
-def get_fastccd_images(light_header, dark_headers=None,
- flat=None, gain=(1, 4, 8), tag=None, roi=None):
+def get_fastccd_images(
+ light_header, dark_headers=None, flat=None, gain=(1, 4, 8), tag=None, roi=None
+):
"""Retreive and correct FastCCD Images from associated headers
Retrieve FastCCD Images from databroker and correct for:
@@ -57,7 +60,7 @@ def get_fastccd_images(light_header, dark_headers=None,
"""
if tag is None:
- tag = detectors['fccd']
+ tag = detectors["fccd"]
# Now lets sort out the ROI
if roi is not None:
@@ -72,8 +75,9 @@ def get_fastccd_images(light_header, dark_headers=None,
logger.warning("Processing without dark images")
else:
if dark_headers[0] is None:
- raise NotImplementedError("Use of header metadata to find dark"
- " images is not implemented yet.")
+ raise NotImplementedError(
+ "Use of header metadata to find dark" " images is not implemented yet."
+ )
# Read the images for the dark headers
t = ttime.time()
@@ -91,25 +95,20 @@ def get_fastccd_images(light_header, dark_headers=None,
tt = ttime.time()
b = bgnd_events.astype(dtype=np.uint16)
- logger.info("Image conversion took %.3f seconds",
- ttime.time() - tt)
+ logger.info("Image conversion took %.3f seconds", ttime.time() - tt)
b = correct_images(b, gain=(1, 1, 1))
tt = ttime.time()
b = stackmean(b)
- logger.info("Mean of image stack took %.3f seconds",
- ttime.time() - tt)
+ logger.info("Mean of image stack took %.3f seconds", ttime.time() - tt)
else:
- if (i == 0):
- logger.warning("Missing dark image"
- " for gain setting 8")
- elif (i == 1):
- logger.warning("Missing dark image"
- " for gain setting 2")
- elif (i == 2):
- logger.warning("Missing dark image"
- " for gain setting 1")
+ if i == 0:
+ logger.warning("Missing dark image" " for gain setting 8")
+ elif i == 1:
+ logger.warning("Missing dark image" " for gain setting 2")
+ elif i == 2:
+ logger.warning("Missing dark image" " for gain setting 1")
dark.append(b)
@@ -128,6 +127,94 @@ def get_fastccd_images(light_header, dark_headers=None,
return _correct_fccd_images(events, bgnd, flat, gain)
+def get_axis_images(light_header, dark_header=None, flat=None, tag=None, roi=None):
+ """Retreive and correct AXIS Images from associated headers
+
+ Retrieve AXIS Images from databroker and correct for:
+
+ - Bad Pixels (converted to ``np.nan``)
+ - Backgorund.
+ - Flatfield correction.
+ - Rotation (returned images are rotated 90 deg cw)
+
+ Parameters
+ ----------
+ light_header : databorker header
+ This header defines the images to convert
+
+ dark_header : databroker header , optional
+ The header is the dark images.
+
+ flat : array_like
+ Array to use for the flatfield correction. This should be a 2D
+ array sized as the last two dimensions of the image stack.
+
+
+ tag : string
+ Data tag used to retrieve images. Used in the call to
+ ``databroker.get_images()``. If `None`, use the defualt from
+ the settings.
+
+ roi : tuple
+ coordinates of the upper-left corner and width and height of
+ the ROI: e.g., (x, y, w, h)
+
+ Returns
+ -------
+ dask.array : corrected images
+
+ """
+ flipped_image = _get_axis1_images(light_header, dark_header, flat, tag, roi)
+ return flipped_image[..., ::-1]
+
+
+def _get_axis1_images(light_header, dark_header=None, flat=None, tag=None, roi=None):
+
+ if tag is None:
+ logger.error("Must pass 'tag' argument to get_axis_images()")
+ raise ValueError("Must pass 'tag' argument")
+
+ # Now lets sort out the ROI
+ if roi is not None:
+ roi = list(roi)
+ # Convert ROI to start:stop from start:size
+ roi[2] = roi[0] + roi[2]
+ roi[3] = roi[1] + roi[3]
+ logger.info("Computing with ROI of %s", str(roi))
+
+ if dark_header is None:
+ bgnd = None
+ logger.warning("Processing without dark images")
+ else:
+
+ # Read the images for the dark headers
+ t = ttime.time()
+
+ d = dark_header
+ bgnd_events = _get_images(d, tag, roi)
+
+ tt = ttime.time()
+ b = bgnd_events.astype(dtype=np.uint16)
+ logger.info("Image conversion took %.3f seconds", ttime.time() - tt)
+ tt = ttime.time()
+ b = stackmean(b)
+ logger.info("Mean of image stack took %.3f seconds", ttime.time() - tt)
+
+ bgnd = np.array(b)
+
+ logger.info("Computed dark images in %.3f seconds", ttime.time() - t)
+
+ events = _get_images(light_header, tag, roi)
+
+ # Ok, so lets return a pims pipeline which does the image conversion
+
+ # Crop Flatfield image
+ if flat is not None and roi is not None:
+ flat = _crop(flat, roi)
+
+ return _correct_axis_images(events, bgnd, flat)
+
+
def get_images_to_4D(images, dtype=None):
"""Convert image stack to 4D numpy array
@@ -147,8 +234,7 @@ def get_images_to_4D(images, dtype=None):
>>> a = get_images_to_4D(images, dtype=np.float32)
"""
- im = np.array([np.asarray(im, dtype=dtype) for im in images],
- dtype=dtype)
+ im = np.array([np.asarray(im, dtype=dtype) for im in images], dtype=dtype)
return im
@@ -185,7 +271,15 @@ def _get_images(header, tag, roi=None):
def _correct_fccd_images(image, bgnd, flat, gain):
image = correct_images(image, bgnd, flat, gain)
- image = rotate90(image, 'cw')
+ image = rotate90(image, "cw")
+ return image
+
+
+def _correct_axis_images(image, bgnd, flat):
+ """
+ The correct_images_axis modified to include rotate90
+ """
+ image = correct_images_axis(image, bgnd, flat)
return image
@@ -196,11 +290,11 @@ def _crop_images(image, roi):
def _crop(image, roi):
image_shape = image.shape
# Assuming ROI is specified in the "rotated" (correct) orientation
- roi = [image_shape[-2]-roi[3], roi[0], image_shape[-1]-roi[1], roi[2]]
- return image.T[roi[1]:roi[3], roi[0]:roi[2]].T
+ roi = [image_shape[-2] - roi[3], roi[0], image_shape[-1] - roi[1], roi[2]]
+ return image.T[roi[1] : roi[3], roi[0] : roi[2]].T
-def get_fastccd_timestamps(header, tag='fccd_image'):
+def get_fastccd_timestamps(header, tag="fccd_image"):
"""Return the FastCCD timestamps from the Areadetector Data File
Return a list of numpy arrays of the timestamps for the images as
@@ -218,13 +312,36 @@ def get_fastccd_timestamps(header, tag='fccd_image'):
list of arrays of the timestamps
"""
- with header.db.reg.handler_context(
- {'AD_HDF5': AreaDetectorHDF5TimestampHandler}):
+ with header.db.reg.handler_context({"AD_HDF5": AreaDetectorHDF5TimestampHandler}):
timestamps = list(header.data(tag))
return timestamps
+def get_axis_timestamps(header, tag="axis1_hdf5_time_stamp"):
+ """Return the AXIS timestamps from the Areadetector Data File
+
+ Return a list of numpy arrays of the timestamps for the images as
+ recorded in the datafile.
+
+ Parameters
+ ----------
+ header : databorker header
+ This header defines the run
+ tag : string
+ This is the tag or name of the fastccd.
+
+ Returns
+ -------
+ list of arrays of the timestamps
+
+ """
+
+ timestamps = list(header.data(tag))
+
+ return timestamps
+
+
def calculate_flatfield(image, limits=(0.6, 1.4)):
"""Calculate a flatfield from fluo data
@@ -259,9 +376,10 @@ def calculate_flatfield(image, limits=(0.6, 1.4)):
return flat
-
-def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False):
- """Calculate a flatfield from two headers
+def get_fastccd_flatfield(
+ light, dark, flat=None, limits=(0.6, 1.4), half_interval=False
+):
+ """Calculate a flatfield from two headers
This routine calculates the flatfield using the
:func:calculate_flatfield() function after obtaining the images from
@@ -278,7 +396,7 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv
limits : tuple limits used for returning corrected pixel flatfield
The tuple setting lower and upper bound. np.nan returned value is outside bounds
half_interval : boolean or tuple to perform calculation for only half of the FastCCD
- Default is False. If True, then the hard-code portion is retained. Customize image
+ Default is False. If True, then the hard-code portion is retained. Customize image
manipulation using a tuple of length 2 for (row_start, row_stop).
@@ -291,7 +409,53 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv
images = stackmean(images)
if half_interval:
if isinstance(half_interval, bool):
- row_start, row_stop = (7, 486) #hard coded for the broken half of the fccd
+ row_start, row_stop = (7, 486) # hard coded for the broken half of the fccd
+ else:
+ row_start, row_stop = half_interval
+ print(row_start, row_stop)
+ images[:, row_start:row_stop] = np.nan
+ flat = calculate_flatfield(images, limits)
+ removed = np.sum(np.isnan(flat))
+ if removed != 0:
+ logger.warning(
+ "Flatfield correction removed %d pixels (%.2f %%)"
+ % (removed, removed * 100 / flat.size)
+ )
+ return flat
+
+
+def get_axis_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interval=False):
+ """Calculate a flatfield from two headers
+
+ This routine calculates the flatfield using the
+ :func:calculate_flatfield() function after obtaining the images from
+ the headers.
+
+ Parameters
+ ----------
+ light : databroker header
+ The header containing the light images
+ dark : databroker header(s)
+ The header(s) from the run containin the dark images.
+ flat : flatfield image (optional)
+ The array to be used for the initial flatfield
+ limits : tuple limits used for returning corrected pixel flatfield
+ The tuple setting lower and upper bound. np.nan returned value is outside bounds
+ half_interval : boolean or tuple to perform calculation for only half of the FastCCD
+ Default is False. If True, then the hard-code portion is retained. Customize image
+ manipulation using a tuple of length 2 for (row_start, row_stop).
+
+
+ Returns
+ -------
+ array_like
+ Flatfield correction. The correction is orientated as "raw data" not final data generated by get_fastccd_images().
+ """
+ images = get_images_to_3D(_get_axis1_images(light, dark, flat))
+ images = stackmean(images)
+ if half_interval:
+ if isinstance(half_interval, bool):
+ row_start, row_stop = (7, 486) # hard coded for the broken half of the fccd
else:
row_start, row_stop = half_interval
print(row_start, row_stop)
@@ -299,8 +463,10 @@ def get_fastccd_flatfield(light, dark, flat=None, limits=(0.6, 1.4), half_interv
flat = calculate_flatfield(images, limits)
removed = np.sum(np.isnan(flat))
if removed != 0:
- logger.warning("Flatfield correction removed %d pixels (%.2f %%)" %
- (removed, removed * 100 / flat.size))
+ logger.warning(
+ "Flatfield correction removed %d pixels (%.2f %%)"
+ % (removed, removed * 100 / flat.size)
+ )
return flat
diff --git a/doc/conf.py b/doc/conf.py
index 2770aa9..3c552e4 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -13,65 +13,65 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
-import os
-import shlex
+# import sys
+# import os
+# import shlex
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.ifconfig',
- 'sphinx.ext.viewcode',
- 'sphinx.ext.napoleon',
- 'IPython.sphinxext.ipython_console_highlighting',
- 'IPython.sphinxext.ipython_directive',
+ "sphinx.ext.autodoc",
+ "sphinx.ext.doctest",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.todo",
+ "sphinx.ext.coverage",
+ "sphinx.ext.mathjax",
+ "sphinx.ext.ifconfig",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.napoleon",
+ "IPython.sphinxext.ipython_console_highlighting",
+ "IPython.sphinxext.ipython_directive",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = 'csxtools'
-copyright = '2015, Brookhaven Science Associates, Brookhaven National Laboratory'
-author = 'Brookhaven Science Associates, Brookhaven National Laboratory'
+project = "csxtools"
+copyright = "2015, Brookhaven Science Associates, Brookhaven National Laboratory"
+author = "Brookhaven Science Associates, Brookhaven National Laboratory"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = '0.1'
+version = "0.1"
# The full version, including alpha/beta/rc tags.
-release = '0.1.0'
+release = "0.1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -82,37 +82,37 @@
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
+# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
@@ -122,160 +122,161 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'bootstrap'
+html_theme = "bootstrap"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-html_theme_options = {'source_link_position': "footer",
- 'navbar_sidebarrel': False,
- 'bootstrap_version': "3",
- 'bootswatch_theme': "united"}
+html_theme_options = {
+ "source_link_position": "footer",
+ "navbar_sidebarrel": False,
+ "bootstrap_version": "3",
+ "bootswatch_theme": "united",
+}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
-#html_title = None
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
-#html_extra_path = []
+# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
-#html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
+# html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
-#html_search_language = 'en'
+# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
-#html_search_options = {'type': 'default'}
+# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
-#html_search_scorer = 'scorer.js'
+# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
-htmlhelp_basename = 'csxtoolsdoc'
+htmlhelp_basename = "csxtoolsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-
-# Latex figure (float) alignment
-#'figure_align': 'htbp',
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ # 'preamble': '',
+ # Latex figure (float) alignment
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'csxtools.tex', 'csxtools Documentation',
- 'Brookhaven Science Associates, Brookhaven National Laboratory', 'manual'),
+ (
+ master_doc,
+ "csxtools.tex",
+ "csxtools Documentation",
+ "Brookhaven Science Associates, Brookhaven National Laboratory",
+ "manual",
+ ),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'csxtools', 'csxtools Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, "csxtools", "csxtools Documentation", [author], 1)]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@@ -284,23 +285,29 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'csxtools', 'csxtools Documentation',
- author, 'csxtools', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ "csxtools",
+ "csxtools Documentation",
+ author,
+ "csxtools",
+ "One line description of project.",
+ "Miscellaneous",
+ ),
]
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
+# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'https://docs.python.org/': None}
+intersphinx_mapping = {"https://docs.python.org/": None}
diff --git a/examples/Correct_FastCCD_Images.ipynb b/examples/Correct_FastCCD_Images.ipynb
index 6ff7669..8718771 100644
--- a/examples/Correct_FastCCD_Images.ipynb
+++ b/examples/Correct_FastCCD_Images.ipynb
@@ -19,22 +19,216 @@
"Load the ``databroker`` moudle, ``csxtools`` and various other dependencies"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python312.zip\n",
+ "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12\n",
+ "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/lib-dynload\n",
+ "\n",
+ "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages\n",
+ "/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/setuptools/_vendor\n"
+ ]
+ }
+ ],
+ "source": [
+ "import sys\n",
+ "for path in sys.path:\n",
+ " print(path)"
+ ]
+ },
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Tiled version 0.1.0b11\n",
+ "INFO:tiled.server.app:Tiled version 0.1.0b11\n"
+ ]
+ }
+ ],
"source": [
"import numpy as np\n",
- "from databroker import DataBroker, get_table\n",
- "from csxtools.utils import get_fastccd_images, get_images_to_4D\n",
+ "#from databroker import DataBroker, get_table\n",
+ "from databroker import Broker\n",
+ "db = Broker.named('csx')\n",
+ "from csxtools.utils import get_fastccd_images, get_images_to_4D, get_fastccd_flatfield\n",
"from csxtools.ipynb import image_stack_to_movie, show_image_stack\n",
"%matplotlib inline\n",
"from matplotlib import pyplot as plt"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\u001b[0;31mSignature:\u001b[0m\n",
+ "\u001b[0mget_fastccd_images\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mlight_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mdark_headers\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mflat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mgain\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mtag\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mroi\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mDocstring:\u001b[0m\n",
+ "Retreive and correct FastCCD Images from associated headers\n",
+ "\n",
+ "Retrieve FastCCD Images from databroker and correct for:\n",
+ "\n",
+ "- Bad Pixels (converted to ``np.nan``)\n",
+ "- Backgorund.\n",
+ "- Multigain bits.\n",
+ "- Flatfield correction.\n",
+ "- Rotation (returned images are rotated 90 deg cw)\n",
+ "\n",
+ "Parameters\n",
+ "----------\n",
+ "light_header : databorker header\n",
+ " This header defines the images to convert\n",
+ "\n",
+ "dark_headers : tuple of 3 databroker headers , optional\n",
+ " These headers are the dark images. The tuple should be formed\n",
+ " from the dark image sets for the Gain 8, Gain 2 and Gain 1\n",
+ " (most sensitive to least sensitive) settings. If a set is not\n",
+ " avaliable then ``None`` can be entered.\n",
+ "\n",
+ "flat : array_like\n",
+ " Array to use for the flatfield correction. This should be a 2D\n",
+ " array sized as the last two dimensions of the image stack.\n",
+ "\n",
+ "gain : tuple\n",
+ " Gain multipliers for the 3 gain settings (most sensitive to\n",
+ " least sensitive)\n",
+ "\n",
+ "tag : string\n",
+ " Data tag used to retrieve images. Used in the call to\n",
+ " ``databroker.get_images()``. If `None`, use the defualt from\n",
+ " the settings.\n",
+ "\n",
+ "roi : tuple\n",
+ " coordinates of the upper-left corner and width and height of\n",
+ " the ROI: e.g., (x, y, w, h)\n",
+ "\n",
+ "Returns\n",
+ "-------\n",
+ "dask.array : corrected images\n",
+ "\u001b[0;31mFile:\u001b[0m /nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py\n",
+ "\u001b[0;31mType:\u001b[0m function"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "get_fastccd_images?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "KeyError",
+ "evalue": "0",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[5], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m scan_ff \u001b[38;5;241m=\u001b[39m db[\u001b[38;5;241m202033\u001b[39m]\n\u001b[1;32m 2\u001b[0m scan_ff_dark \u001b[38;5;241m=\u001b[39m db[\u001b[38;5;241m202032\u001b[39m]\n\u001b[0;32m----> 3\u001b[0m a\u001b[38;5;241m=\u001b[39m\u001b[43mget_fastccd_flatfield\u001b[49m\u001b[43m(\u001b[49m\u001b[43mscan_ff\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscan_ff_dark\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlimits\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m0.9\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1.1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py:290\u001b[0m, in \u001b[0;36mget_fastccd_flatfield\u001b[0;34m(light, dark, flat, limits, half_interval)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget_fastccd_flatfield\u001b[39m(light, dark, flat\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, limits\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m0.6\u001b[39m, \u001b[38;5;241m1.4\u001b[39m), half_interval\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[1;32m 264\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Calculate a flatfield from two headers \u001b[39;00m\n\u001b[1;32m 265\u001b[0m \n\u001b[1;32m 266\u001b[0m \u001b[38;5;124;03m This routine calculates the flatfield using the\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[38;5;124;03m Flatfield correction. The correction is orientated as \"raw data\" not final data generated by get_fastccd_images().\u001b[39;00m\n\u001b[1;32m 289\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 290\u001b[0m images \u001b[38;5;241m=\u001b[39m get_images_to_3D(\u001b[43mget_fastccd_images\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdark\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflat\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 291\u001b[0m images \u001b[38;5;241m=\u001b[39m stackmean(images)\n\u001b[1;32m 292\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m half_interval:\n",
+ "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py:74\u001b[0m, in \u001b[0;36mget_fastccd_images\u001b[0;34m(light_header, dark_headers, flat, gain, tag, roi)\u001b[0m\n\u001b[1;32m 72\u001b[0m logger\u001b[38;5;241m.\u001b[39mwarning(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mProcessing without dark images\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 73\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m---> 74\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mdark_headers\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 75\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse of header metadata to find dark\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 76\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m images is not implemented yet.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 78\u001b[0m \u001b[38;5;66;03m# Read the images for the dark headers\u001b[39;00m\n",
+ "File \u001b[0;32m/nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/databroker/v1.py:853\u001b[0m, in \u001b[0;36mHeader.__getitem__\u001b[0;34m(self, k)\u001b[0m\n\u001b[1;32m 851\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, k)\n\u001b[1;32m 852\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 853\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m(k)\n",
+ "\u001b[0;31mKeyError\u001b[0m: 0"
+ ]
+ }
+ ],
+ "source": [
+ "scan_ff = db[202033]\n",
+ "scan_ff_dark = db[202032]\n",
+ "a=get_fastccd_flatfield(scan_ff, scan_ff_dark, )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "collapsed": true,
+ "jupyter": {
+ "outputs_hidden": true
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\u001b[0;31mSignature:\u001b[0m\n",
+ "\u001b[0mget_fastccd_flatfield\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mlight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mdark\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mflat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mlimits\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0.6\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1.4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m \u001b[0mhalf_interval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
+ "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mDocstring:\u001b[0m\n",
+ "Calculate a flatfield from two headers \n",
+ "\n",
+ "This routine calculates the flatfield using the\n",
+ ":func:calculate_flatfield() function after obtaining the images from\n",
+ "the headers.\n",
+ "\n",
+ "Parameters\n",
+ "----------\n",
+ "light : databroker header\n",
+ " The header containing the light images\n",
+ "dark : databroker header(s)\n",
+ " The header(s) from the run containin the dark images. See get_fastccd_images for details\n",
+ "flat : flatfield image (optional)\n",
+ " The array to be used for the initial flatfield\n",
+ "limits : tuple limits used for returning corrected pixel flatfield\n",
+ " The tuple setting lower and upper bound. np.nan returned value is outside bounds\n",
+ "half_interval : boolean or tuple to perform calculation for only half of the FastCCD\n",
+ " Default is False. If True, then the hard-code portion is retained. Customize image \n",
+ " manipulation using a tuple of length 2 for (row_start, row_stop).\n",
+ "\n",
+ "\n",
+ "Returns\n",
+ "-------\n",
+ "array_like\n",
+ " Flatfield correction. The correction is orientated as \"raw data\" not final data generated by get_fastccd_images().\n",
+ "\u001b[0;31mFile:\u001b[0m /nsls2/software/common/jupyter/kernel_envs/beta/.pixi/envs/default/lib/python3.12/site-packages/csxtools/utils.py\n",
+ "\u001b[0;31mType:\u001b[0m function"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "get_fastccd_flatfield?"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -46,7 +240,10 @@
"cell_type": "code",
"execution_count": 2,
"metadata": {
- "collapsed": true
+ "collapsed": true,
+ "jupyter": {
+ "outputs_hidden": true
+ }
},
"outputs": [],
"source": [
@@ -65,7 +262,10 @@
"cell_type": "code",
"execution_count": 3,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -84,7 +284,10 @@
"cell_type": "code",
"execution_count": 4,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -116,7 +319,10 @@
"cell_type": "code",
"execution_count": 5,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -156,7 +362,10 @@
"cell_type": "code",
"execution_count": 6,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -177,7 +386,10 @@
"cell_type": "code",
"execution_count": 7,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -218,7 +430,10 @@
"cell_type": "code",
"execution_count": 8,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -241,7 +456,10 @@
"cell_type": "code",
"execution_count": 9,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -263,7 +481,10 @@
"cell_type": "code",
"execution_count": 10,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -306,7 +527,10 @@
"cell_type": "code",
"execution_count": 11,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -336,7 +560,10 @@
"cell_type": "code",
"execution_count": 12,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -371,7 +598,10 @@
"cell_type": "code",
"execution_count": 13,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [
{
@@ -393,9 +623,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "xf23id1-srv2 - Analysis Conda Env",
- "language": "",
- "name": "srv2-analysis-kernel"
+ "display_name": "Python 3 (beta)",
+ "language": "python",
+ "name": "python3_beta"
},
"language_info": {
"codemirror_mode": {
@@ -407,9 +637,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.0"
+ "version": "3.12.7"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..145d9bf
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,8 @@
+[build-system]
+requires = [
+ "setuptools>=61.0",
+ "wheel",
+ "numpy",
+ "versioneer[toml]>=0.28"
+]
+build-backend = "setuptools.build_meta"
diff --git a/run_tests.py b/run_tests.py
index 41c2499..81e8997 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,4 +1,5 @@
if __name__ == "__main__":
import pytest
import sys
+
sys.exit(pytest.main())
diff --git a/setup.cfg b/setup.cfg
index a1cd9c4..970f7bc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,11 +3,6 @@ tag_build =
tag_svn_revision = 1
[tool:pytest]
-pep8ignore =
- _old/* ALL
- __init__.py ALL
- csxtools/image_corr.py ALL
- doc/conf.py ALL
[build_sphinx]
source-dir = doc/
diff --git a/setup.py b/setup.py
index 95e5056..8513a5a 100644
--- a/setup.py
+++ b/setup.py
@@ -1,29 +1,33 @@
from __future__ import absolute_import, division, print_function
import sys
-from distutils.core import Extension, setup
+from setuptools import Extension
from os import path
-import numpy as np
import setuptools
-
+from setuptools.command.build_ext import build_ext
import versioneer
-min_version = (3, 8)
-if sys.version_info < min_version:
- error = """
-bluesky-adaptive does not support Python {0}.{1}.
-Python {2}.{3} and above is required. Check your Python version like so:
-python3 --version
+# Custom build_ext to delay NumPy import and strip suffix
+class CustomBuildExt(build_ext):
+ def finalize_options(self):
+ super().finalize_options()
+ import numpy # <== DELAY numpy import until now
+
+ self.include_dirs.append(numpy.get_include())
+
+ def get_ext_filename(self, ext_name):
+ filename = super().get_ext_filename(ext_name)
+ return filename.split(".")[0] + ".so"
-This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
-Upgrade pip like so:
-pip install --upgrade pip
-""".format(
- *(sys.version_info[:2] + min_version)
- )
+min_version = (3, 8)
+if sys.version_info < min_version:
+ error = f"""
+csxtools does not support Python {sys.version_info[0]}.{sys.version_info[1]}.
+Python {min_version[0]}.{min_version[1]} and above is required.
+"""
sys.exit(error)
here = path.abspath(path.dirname(__file__))
@@ -31,13 +35,13 @@
with open(path.join(here, "README.md"), encoding="utf-8") as readme_file:
readme = readme_file.read()
-
with open("requirements.txt") as f:
requirements = f.read().split()
with open("requirements-extras.txt") as f:
extras_require = {"complete": f.read().split()}
+# C extensions
fastccd = Extension(
"fastccd",
sources=["src/fastccdmodule.c", "src/fastccd.c"],
@@ -45,8 +49,18 @@
extra_link_args=["-lgomp"],
)
+axis1 = Extension(
+ "axis1",
+ sources=["src/axis1module.c", "src/axis1.c"],
+ extra_compile_args=["-fopenmp"],
+ extra_link_args=["-lgomp"],
+)
+
image = Extension(
- "image", sources=["src/imagemodule.c", "src/image.c"], extra_compile_args=["-fopenmp"], extra_link_args=["-lgomp"]
+ "image",
+ sources=["src/imagemodule.c", "src/image.c"],
+ extra_compile_args=["-fopenmp"],
+ extra_link_args=["-lgomp"],
)
phocount = Extension(
@@ -55,22 +69,26 @@
extra_compile_args=["-fopenmp"],
extra_link_args=["-lgomp"],
)
-setup(
+
+# Setup
+setuptools.setup(
name="csxtools",
version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
+ cmdclass={
+ **versioneer.get_cmdclass(),
+ "build_ext": CustomBuildExt,
+ },
author="Brookhaven National Laboratory",
- description="Python library for tools to be used at the Coherent Soft X-ray scattering (CSX) beamline at NSLS-II.",
+ description="""Python library for tools to be used at the Coherent Soft X-ray scattering (CSX)
+ beamline at NSLS-II.""",
packages=setuptools.find_packages(exclude=["src", "tests"]),
python_requires=">={}".format(".".join(str(n) for n in min_version)),
long_description=readme,
- long_description_content_type='text/markdown',
- ext_package="csxtools.ext",
- include_dirs=[np.get_include()],
- ext_modules=[fastccd, image, phocount],
- tests_require=["pytest"],
+ long_description_content_type="text/markdown",
install_requires=requirements,
extras_require=extras_require,
+ ext_package="csxtools.ext",
+ ext_modules=[fastccd, axis1, image, phocount],
url="https://github.com/NSLS-II-CSX/csxtools",
keywords="Xray Analysis",
license="BSD",
@@ -78,5 +96,6 @@
"Development Status :: 2 - Pre-Alpha",
"Natural Language :: English",
"Programming Language :: Python :: 3",
+ "Programming Language :: C",
],
)
diff --git a/src/axis1.c b/src/axis1.c
new file mode 100644
index 0000000..a8933df
--- /dev/null
+++ b/src/axis1.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014, Brookhaven Science Associates, Brookhaven
+ * National Laboratory. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of the Brookhaven Science Associates, Brookhaven
+ * National Laboratory nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include "axis1.h"
+
+
+// Correct axis1 images by looping over all images correcting for background
+int correct_axis_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat,
+ int ndims, index_t *dims) {
+ index_t nimages, k;
+ int n;
+
+ if (ndims == 2) {
+ nimages = 1;
+ } else {
+ nimages = dims[0];
+ for (n = 1; n < (ndims - 2); n++) {
+ nimages = nimages * dims[n];
+ }
+ }
+
+ index_t height = dims[ndims - 2]; // y
+ index_t width = dims[ndims - 1]; // x
+ index_t imsize = height * width;
+
+#pragma omp parallel for private(k) schedule(static)
+ for (index_t img = 0; img < nimages; img++) {
+ for (index_t y = 0; y < height; y++) {
+ for (index_t x = 0; x < width; x++) {
+ index_t in_idx = img * imsize + y * width + x;
+ index_t rot_x = height - 1 - y; // flip rows
+ index_t rot_y = x;
+ index_t out_idx = img * imsize + rot_y * height + rot_x; // (N, x, y) layout
+
+ data_t bg_val = bg[y * width + x];
+ data_t flat_val = flat[y * width + x];
+
+ if (in[in_idx]) {
+ out[out_idx] = flat_val * ((data_t)(in[in_idx]) - bg_val);
+ } else {
+ out[out_idx] = 0.0f;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+
diff --git a/src/axis1.h b/src/axis1.h
new file mode 100644
index 0000000..b7e8585
--- /dev/null
+++ b/src/axis1.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Brookhaven Science Associates, Brookhaven
+ * National Laboratory. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of the Brookhaven Science Associates, Brookhaven
+ * National Laboratory nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _AXIS1_H
+#define _AXIS1_H
+
+// Use a size of long for big arrays
+typedef long index_t;
+typedef float data_t;
+
+int correct_axis_images(uint16_t *in, data_t *out, data_t *bg, data_t *flat,
+ int ndims, index_t *dims);
+#endif
diff --git a/src/axis1module.c b/src/axis1module.c
new file mode 100644
index 0000000..0ae92f0
--- /dev/null
+++ b/src/axis1module.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014, Brookhaven Science Associates, Brookhaven
+ * National Laboratory. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of the Brookhaven Science Associates, Brookhaven
+ * National Laboratory nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include
+#include
+
+/* Include python and numpy header files */
+#include
+#define NPY_NO_DEPRECATED_API NPY_1_9_API_VERSION
+#include
+#include
+
+#include "axis1.h"
+
+static PyObject* axis1_correct_images(PyObject *self, PyObject *args){
+ PyObject *_input = NULL;
+ PyObject *_bgnd = NULL;
+ PyObject *_flat = NULL;
+ PyArrayObject *input = NULL;
+ PyArrayObject *bgnd = NULL;
+ PyArrayObject *flat = NULL;
+ PyArrayObject *out = NULL;
+ npy_intp *dims;
+ npy_intp *dims_bgnd;
+ npy_intp *dims_flat;
+ int ndims;
+
+ if(!PyArg_ParseTuple(args, "OOO", &_input, &_bgnd, &_flat)){
+ return NULL;
+ }
+
+ input = (PyArrayObject*)PyArray_FROMANY(_input, NPY_UINT16, 2, 0,NPY_ARRAY_IN_ARRAY);
+ if(!input){
+ goto error;
+ }
+
+ bgnd = (PyArrayObject*)PyArray_FROMANY(_bgnd, NPY_FLOAT, 2, 2, NPY_ARRAY_IN_ARRAY);
+ if(!bgnd){
+ goto error;
+ }
+
+ flat = (PyArrayObject*)PyArray_FROMANY(_flat, NPY_FLOAT, 2,2, NPY_ARRAY_IN_ARRAY);
+ if(!flat){
+ goto error;
+ }
+
+ ndims = PyArray_NDIM(input);
+ dims = PyArray_DIMS(input);
+ dims_bgnd = PyArray_DIMS(bgnd);
+ dims_flat = PyArray_DIMS(flat);
+
+ // Check array dimensions for dark and flat
+ if(dims[ndims-2] != dims_bgnd[0] || dims[ndims-1] != dims_bgnd[1]){
+ PyErr_SetString(PyExc_ValueError, "Dimensions of image array do not match background array dimensions.");
+ goto error;
+ }
+ if(dims[ndims-2] != dims_flat[0] || dims[ndims-1] != dims_flat[1]){
+ PyErr_SetString(PyExc_ValueError, "Dimensions of image array do not match flat-field array dimensions.");
+ goto error;
+ }
+
+ out = (PyArrayObject*)PyArray_SimpleNew(ndims, dims, NPY_FLOAT);
+ if(!out){
+ goto error;
+ }
+
+ uint16_t* input_p = (uint16_t*)PyArray_DATA(input);
+ data_t *out_p = (data_t*)PyArray_DATA(out);
+ data_t *bgnd_p = (data_t*)PyArray_DATA(bgnd);
+ data_t *flat_p = (data_t*)PyArray_DATA(flat);
+
+ // Ok now we don't touch Python Object ... Release the GIL
+ Py_BEGIN_ALLOW_THREADS
+
+ correct_axis_images(input_p, out_p, bgnd_p, flat_p,
+ ndims, (index_t*)dims);
+
+ Py_END_ALLOW_THREADS
+
+ Py_XDECREF(input);
+ Py_XDECREF(bgnd);
+ Py_XDECREF(flat);
+ return Py_BuildValue("N", out);
+
+error:
+ Py_XDECREF(input);
+ Py_XDECREF(bgnd);
+ Py_XDECREF(out);
+ Py_XDECREF(flat);
+ return NULL;
+}
+
+static PyMethodDef AXIS1_Methods[] = {
+ { "correct_images_axis", axis1_correct_images, METH_VARARGS,
+ "Correct AXIS1 Images"},
+ {NULL, NULL, 0, NULL}
+};
+
+static struct PyModuleDef axis1module = {
+ PyModuleDef_HEAD_INIT,
+ "axis1", /* name of module */
+ NULL, /* module documentation, may be NULL */
+ -1, /* size of per-interpreter state of the module,
+ or -1 if the module keeps state in global variables. */
+ AXIS1_Methods
+};
+
+PyMODINIT_FUNC PyInit_axis1(void) {
+ PyObject *m;
+ m = PyModule_Create(&axis1module);
+ if(m == NULL){
+ return NULL;
+ }
+
+ import_array();
+ import_umath();
+
+ return m;
+}
diff --git a/tests/test_fastccd.py b/tests/test_fastccd.py
index 7d4b496..e581683 100644
--- a/tests/test_fastccd.py
+++ b/tests/test_fastccd.py
@@ -1,7 +1,10 @@
import numpy as np
from csxtools.fastccd import correct_images, photon_count
-from numpy.testing import (assert_array_max_ulp, assert_array_equal,
- assert_array_almost_equal)
+from numpy.testing import (
+ assert_array_max_ulp,
+ assert_array_equal,
+ assert_array_almost_equal,
+)
def test_correct_images():
@@ -19,24 +22,32 @@ def test_correct_images():
def test_photon_count():
- x = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 4, 3, 0],
- [0, 0, 0, 10, 0, 4, 0, 0],
- [0, 0, 4, 6, 2, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)
+ x = np.array(
+ [
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 4, 3, 0],
+ [0, 0, 0, 10, 0, 4, 0, 0],
+ [0, 0, 4, 6, 2, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ ],
+ dtype=np.float32,
+ )
nsum = 3
y = np.zeros_like(x)
y[2, 3] = 20
z = np.zeros_like(x)
- z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0],
- dtype=np.float32)[:nsum])
-
- op = photon_count(np.array([x, x, x], dtype=np.float32),
- thresh=(5, 13), mean_filter=(10, 30),
- std_filter=(0, 100), nsum=nsum)
+ z[2, 3] = np.std(np.array([10, 6, 4, 2, 0, 0, 0, 0, 0], dtype=np.float32)[:nsum])
+
+ op = photon_count(
+ np.array([x, x, x], dtype=np.float32),
+ thresh=(5, 13),
+ mean_filter=(10, 30),
+ std_filter=(0, 100),
+ nsum=nsum,
+ )
assert_array_equal(op[0], np.array([y, y, y]))
assert_array_almost_equal(op[1], np.array([z, z, z]), decimal=6)
diff --git a/tests/test_image.py b/tests/test_image.py
index bc4944d..b24df9d 100644
--- a/tests/test_image.py
+++ b/tests/test_image.py
@@ -1,16 +1,24 @@
-from csxtools.image import (rotate90, stackmean, stacksum, stackstd,
- stackvar, stackstderr, images_mean, images_sum)
+from csxtools.image import (
+ rotate90,
+ stackmean,
+ stacksum,
+ stackstd,
+ stackvar,
+ stackstderr,
+ images_mean,
+ images_sum,
+)
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
def test_rotate90():
- x = np.arange(4*20, dtype=np.float32).reshape(4, 20)
- y = rotate90(np.array([x, x, x, x]), 'ccw')
+ x = np.arange(4 * 20, dtype=np.float32).reshape(4, 20)
+ y = rotate90(np.array([x, x, x, x]), "ccw")
for i in y:
assert_array_equal(i, np.rot90(x, 1))
- y = rotate90(np.array([x, x, x, x]), 'cw')
+ y = rotate90(np.array([x, x, x, x]), "cw")
for i in y:
assert_array_equal(i, np.rot90(x, -1))
@@ -52,45 +60,68 @@ def test_stacksum():
x[23] = np.nan
x[40] = np.nan
m, n = stacksum(x)
- assert_array_almost_equal(m, np.ones((100, 100), dtype=np.float32) * 2000,
- decimal=3)
+ assert_array_almost_equal(
+ m, np.ones((100, 100), dtype=np.float32) * 2000, decimal=3
+ )
assert_array_equal(n, np.ones((100, 100), dtype=np.float32) * (1000 - 3))
def test_stackstd():
- x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape(
- (1000, 20, 20))
+ x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20))
m, n = stackstd(x)
assert_array_almost_equal(m, np.std(x, axis=0), 2)
assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0)
def test_stackvar():
- x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape(
- (1000, 20, 20))
+ x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20))
m, n = stackvar(x)
assert_array_almost_equal(m, np.var(x, axis=0), 0)
assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0)
def test_stackstderr():
- x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape(
- (1000, 20, 20))
+ x = np.repeat(np.arange(1000, dtype=np.float32), 400).reshape((1000, 20, 20))
m, n = stackstderr(x)
assert_array_almost_equal(m, np.std(x, axis=0) / np.sqrt(n), 3)
assert_array_equal(n, np.ones((20, 20), dtype=np.float32) * 1000.0)
def test_images_mean():
- x = np.array([np.repeat(ii*np.ones(ii*100, dtype=np.float32), 400).reshape(
- (ii*100, 20, 20)) for ii in range(1, 11)])
+ x = [
+ np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape(
+ (ii * 100, 20, 20)
+ )
+ for ii in range(1, 11)
+ ]
+
+ # x = np.array(
+ # [
+ # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape(
+ # (ii * 100, 20, 20)
+ # )
+ # for ii in range(1, 11)
+ # ]
+ # )
m = images_mean(x)
assert_array_equal(m, np.array([np.mean(x1) for x1 in x]), 3)
def test_images_sum():
- x = np.array([np.repeat(ii*np.ones(ii*100, dtype=np.float32), 400).reshape(
- (ii*100, 20, 20)) for ii in range(1, 11)])
+ x = [
+ np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape(
+ (ii * 100, 20, 20)
+ )
+ for ii in range(1, 11)
+ ]
+
+ # x = np.array(
+ # [
+ # np.repeat(ii * np.ones(ii * 100, dtype=np.float32), 400).reshape(
+ # (ii * 100, 20, 20)
+ # )
+ # for ii in range(1, 11)
+ # ]
+ # )
m = images_sum(x)
- assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0))
- for x1 in x]), 3)
+ assert_array_equal(m, np.array([np.sum(np.mean(x1, axis=0)) for x1 in x]), 3)
diff --git a/versioneer.py b/versioneer.py
index 5db821a..14e2960 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,4 +1,3 @@
-
# Version: 0.15
"""
@@ -340,6 +339,7 @@
"""
from __future__ import print_function
+
try:
import configparser
except ImportError:
@@ -368,11 +368,13 @@ def get_root():
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
- err = ("Versioneer was unable to run the project root directory. "
- "Versioneer requires setup.py to be executed from "
- "its immediate directory (like 'python setup.py COMMAND'), "
- "or in a way that lets it use sys.argv[0] to find the root "
- "(like 'python path/to/setup.py COMMAND').")
+ err = (
+ "Versioneer was unable to run the project root directory. "
+ "Versioneer requires setup.py to be executed from "
+ "its immediate directory (like 'python setup.py COMMAND'), "
+ "or in a way that lets it use sys.argv[0] to find the root "
+ "(like 'python path/to/setup.py COMMAND')."
+ )
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
@@ -383,8 +385,10 @@ def get_root():
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
- print("Warning: build in %s is using versioneer.py from %s"
- % (os.path.dirname(me), versioneer_py))
+ print(
+ "Warning: build in %s is using versioneer.py from %s"
+ % (os.path.dirname(me), versioneer_py)
+ )
except NameError:
pass
return root
@@ -404,6 +408,7 @@ def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
+
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
@@ -418,6 +423,7 @@ def get(parser, name):
class NotThisMethod(Exception):
pass
+
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
@@ -429,6 +435,7 @@ def decorate(f):
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
+
return decorate
@@ -439,9 +446,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
+ p = subprocess.Popen(
+ [c] + args,
+ cwd=cwd,
+ stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr else None),
+ )
break
except EnvironmentError:
e = sys.exc_info()[1]
@@ -463,7 +473,11 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
print("unable to run %s (error)" % dispcmd)
return None
return stdout
-LONG_VERSION_PY['git'] = '''
+
+
+LONG_VERSION_PY[
+ "git"
+] = """
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -923,7 +937,7 @@ def get_versions():
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
-'''
+"""
@register_vcs_handler("git", "get_keywords")
@@ -963,7 +977,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
@@ -972,27 +986,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
+ tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
- print("discarding '%s', no digits" % ",".join(refs-tags))
+ print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
+ r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None
- }
+ return {
+ "version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": None,
+ }
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags"}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False,
+ "error": "no suitable tags",
+ }
@register_vcs_handler("git", "pieces_from_vcs")
@@ -1012,9 +1031,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
- describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long"],
- cwd=root)
+ describe_out = run_command(
+ GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root
+ )
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
@@ -1037,17 +1056,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
+ git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%s'"
- % describe_out)
+ pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
@@ -1056,10 +1074,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
- % (full_tag, tag_prefix))
+ pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
+ full_tag,
+ tag_prefix,
+ )
return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
+ pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
@@ -1070,8 +1090,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
- count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
@@ -1116,12 +1135,18 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
- print("guessing rootdir is '%s', but '%s' doesn't start with "
- "prefix '%s'" % (root, dirname, parentdir_prefix))
+ print(
+ "guessing rootdir is '%s', but '%s' doesn't start with "
+ "prefix '%s'" % (root, dirname, parentdir_prefix)
+ )
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None}
+ return {
+ "version": dirname[len(parentdir_prefix) :],
+ "full-revisionid": None,
+ "dirty": False,
+ "error": None,
+ }
+
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15) from
@@ -1148,8 +1173,9 @@ def versions_from_file(filename):
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
- mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
+ mo = re.search(
+ r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
+ )
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
@@ -1157,8 +1183,7 @@ def versions_from_file(filename):
def write_to_version_file(filename, versions):
os.unlink(filename)
- contents = json.dumps(versions, sort_keys=True,
- indent=1, separators=(",", ": "))
+ contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
@@ -1188,8 +1213,7 @@ def render_pep440(pieces):
rendered += ".dirty"
else:
# exception #1
- rendered = "0+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@@ -1296,10 +1320,12 @@ def render_git_describe_long(pieces):
def render(pieces, style):
if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"]}
+ return {
+ "version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ }
if not style or style == "default":
style = "pep440" # the default
@@ -1319,8 +1345,12 @@ def render(pieces, style):
else:
raise ValueError("unknown style '%s'" % style)
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None}
+ return {
+ "version": rendered,
+ "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"],
+ "error": None,
+ }
class VersioneerBadRootError(Exception):
@@ -1341,8 +1371,9 @@ def get_versions(verbose=False):
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
- assert cfg.versionfile_source is not None, \
- "please set versioneer.versionfile_source"
+ assert (
+ cfg.versionfile_source is not None
+ ), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
@@ -1396,8 +1427,12 @@ def get_versions(verbose=False):
if verbose:
print("unable to compute version")
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None, "error": "unable to compute version"}
+ return {
+ "version": "0+unknown",
+ "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version",
+ }
def get_version():
@@ -1443,6 +1478,7 @@ def run(self):
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
+
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
@@ -1466,10 +1502,10 @@ def run(self):
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
- target_versionfile = os.path.join(self.build_lib,
- cfg.versionfile_build)
+ target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
+
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
@@ -1488,13 +1524,17 @@ def run(self):
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
@@ -1522,8 +1562,10 @@ def make_release_tree(self, base_dir, files):
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile,
- self._versioneer_generated_versions)
+ write_to_version_file(
+ target_versionfile, self._versioneer_generated_versions
+ )
+
cmds["sdist"] = cmd_sdist
return cmds
@@ -1577,11 +1619,13 @@ def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
- except (EnvironmentError, configparser.NoSectionError,
- configparser.NoOptionError) as e:
+ except (
+ EnvironmentError,
+ configparser.NoSectionError,
+ configparser.NoOptionError,
+ ) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
- print("Adding sample versioneer config to setup.cfg",
- file=sys.stderr)
+ print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
@@ -1590,15 +1634,18 @@ def do_setup():
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG % {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
-
- ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
- "__init__.py")
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
+ ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
@@ -1640,8 +1687,10 @@ def do_setup():
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
- print(" appending versionfile_source ('%s') to MANIFEST.in" %
- cfg.versionfile_source)
+ print(
+ " appending versionfile_source ('%s') to MANIFEST.in"
+ % cfg.versionfile_source
+ )
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
@@ -1689,6 +1738,7 @@ def scan_setup_py():
errors += 1
return errors
+
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":