From ad269ddcf03deee6d791288dba84f604b88ad738 Mon Sep 17 00:00:00 2001 From: Ryan Mueller Date: Wed, 14 Jan 2026 15:58:40 -0500 Subject: [PATCH 1/2] Speeding up cropping logic --- src/RawHandler/RawHandlerRawpy.py | 41 ++++++++++++++++++++----------- src/RawHandler/utils.py | 29 +++++++++++++--------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/src/RawHandler/RawHandlerRawpy.py b/src/RawHandler/RawHandlerRawpy.py index 7b3854d..7bc27a6 100644 --- a/src/RawHandler/RawHandlerRawpy.py +++ b/src/RawHandler/RawHandlerRawpy.py @@ -134,21 +134,32 @@ def apply_colorspace_transform( def compute_mask_and_sparse( self, dims=None, safe_crop=0, divide_by_wl=True ) -> Tuple[np.ndarray, np.ndarray]: - sparse, mask = sparse_representation_and_mask( - self.rawpy_object.raw_image_visible, self.core_metadata.raw_pattern - ) - if divide_by_wl: - sparse = sparse / self.core_metadata.white_level - if dims is not None: - h1, h2, w1, w2 = dims - if safe_crop: - h1, h2, w1, w2 = list( - map(lambda x: x - x % safe_crop, [h1, h2, w1, w2]) - ) - return sparse[:, h1:h2, w1:w2], mask[:, h1:h2, w1:w2] - else: - return sparse, mask - + raw_img = self.rawpy_object.raw_image_visible + + if dims is not None: + h1, h2, w1, w2 = dims + if safe_crop: + # Replaced lambda/map with bitwise/integer math for speed + h1 -= h1 % safe_crop + h2 -= h2 % safe_crop + w1 -= w1 % safe_crop + w2 -= w2 % safe_crop + raw_img = raw_img[h1:h2, w1:w2] + # Roll the pattern to align with crop + pattern = np.roll(self.core_metadata.raw_pattern, shift=(-h1, -w1), axis=(0, 1)) + # Compute sparse representation on the (potentially smaller) image + sparse, mask = sparse_representation_and_mask( + raw_img, pattern + ) + + # Scale by white level + if divide_by_wl: + # Multiply by reciprocal is often faster than division + sparse = sparse * (1.0 / self.core_metadata.white_level) + + return sparse, mask + + def downsize( self, min_preview_size=256, colorspace=None, clip=False, safe_crop=0 ) -> np.ndarray: diff --git a/src/RawHandler/utils.py b/src/RawHandler/utils.py index 5e9fe7a..acc563f 100644 --- a/src/RawHandler/utils.py +++ b/src/RawHandler/utils.py @@ -343,19 +343,24 @@ def sparse_representation(cfa, pattern="RGGB", cfa_type="bayer"): sparse[ch, i::6, j::6] = cfa[i::6, j::6] return sparse - def sparse_representation_and_mask(cfa, pattern): H, W = cfa.shape + ph, pw = pattern.shape + # If two green channels, set both to 1 + pattern[pattern==3] = 1 + # Create the output arrays sparse = np.zeros((3, H, W), dtype=cfa.dtype) - mask = np.zeros((3, H, W), dtype=int) - pattern_shape = pattern.shape - for i in range(pattern_shape[0]): - for j in range(pattern_shape[1]): - ch = pattern[i, j] - if ch == 3: - ch = 1 - sparse[ch, i :: pattern_shape[0], j :: pattern_shape[1]] = cfa[ - i :: pattern_shape[0], j :: pattern_shape[1] - ] - mask[ch, i :: pattern_shape[0], j :: pattern_shape[1]] = 1 + mask = np.zeros((3, H, W), dtype=np.uint8) + + # Tile the pattern to match the CFA shape + full_pattern = np.tile(pattern, (H // ph + 1, W // pw + 1)) + full_pattern = full_pattern[:H, :W] + + # Vectorized assignment for each channel (R, G, B) + for ch in range(3): + ch_mask = (full_pattern == ch) + mask[ch] = ch_mask + sparse[ch] = cfa * ch_mask return sparse, mask + + From f5fc3bb7883f054c1e7b93de2e17e5dc1306f2b8 Mon Sep 17 00:00:00 2001 From: Ryan Mueller Date: Wed, 14 Jan 2026 16:02:25 -0500 Subject: [PATCH 2/2] Linting --- src/RawHandler/RawHandlerRawpy.py | 51 +++++++++++++++---------------- src/RawHandler/utils.py | 9 +++--- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/src/RawHandler/RawHandlerRawpy.py b/src/RawHandler/RawHandlerRawpy.py index 7bc27a6..3bbe9c2 100644 --- a/src/RawHandler/RawHandlerRawpy.py +++ b/src/RawHandler/RawHandlerRawpy.py @@ -134,32 +134,31 @@ def apply_colorspace_transform( def compute_mask_and_sparse( self, dims=None, safe_crop=0, divide_by_wl=True ) -> Tuple[np.ndarray, np.ndarray]: - raw_img = self.rawpy_object.raw_image_visible - - if dims is not None: - h1, h2, w1, w2 = dims - if safe_crop: - # Replaced lambda/map with bitwise/integer math for speed - h1 -= h1 % safe_crop - h2 -= h2 % safe_crop - w1 -= w1 % safe_crop - w2 -= w2 % safe_crop - raw_img = raw_img[h1:h2, w1:w2] - # Roll the pattern to align with crop - pattern = np.roll(self.core_metadata.raw_pattern, shift=(-h1, -w1), axis=(0, 1)) - # Compute sparse representation on the (potentially smaller) image - sparse, mask = sparse_representation_and_mask( - raw_img, pattern - ) - - # Scale by white level - if divide_by_wl: - # Multiply by reciprocal is often faster than division - sparse = sparse * (1.0 / self.core_metadata.white_level) - - return sparse, mask - - + raw_img = self.rawpy_object.raw_image_visible + + if dims is not None: + h1, h2, w1, w2 = dims + if safe_crop: + # Replaced lambda/map with bitwise/integer math for speed + h1 -= h1 % safe_crop + h2 -= h2 % safe_crop + w1 -= w1 % safe_crop + w2 -= w2 % safe_crop + raw_img = raw_img[h1:h2, w1:w2] + # Roll the pattern to align with crop + pattern = np.roll( + self.core_metadata.raw_pattern, shift=(-h1, -w1), axis=(0, 1) + ) + # Compute sparse representation on the (potentially smaller) image + sparse, mask = sparse_representation_and_mask(raw_img, pattern) + + # Scale by white level + if divide_by_wl: + # Multiply by reciprocal is often faster than division + sparse = sparse * (1.0 / self.core_metadata.white_level) + + return sparse, mask + def downsize( self, min_preview_size=256, colorspace=None, clip=False, safe_crop=0 ) -> np.ndarray: diff --git a/src/RawHandler/utils.py b/src/RawHandler/utils.py index acc563f..6e5c4f5 100644 --- a/src/RawHandler/utils.py +++ b/src/RawHandler/utils.py @@ -343,24 +343,23 @@ def sparse_representation(cfa, pattern="RGGB", cfa_type="bayer"): sparse[ch, i::6, j::6] = cfa[i::6, j::6] return sparse + def sparse_representation_and_mask(cfa, pattern): H, W = cfa.shape ph, pw = pattern.shape # If two green channels, set both to 1 - pattern[pattern==3] = 1 + pattern[pattern == 3] = 1 # Create the output arrays sparse = np.zeros((3, H, W), dtype=cfa.dtype) mask = np.zeros((3, H, W), dtype=np.uint8) - + # Tile the pattern to match the CFA shape full_pattern = np.tile(pattern, (H // ph + 1, W // pw + 1)) full_pattern = full_pattern[:H, :W] # Vectorized assignment for each channel (R, G, B) for ch in range(3): - ch_mask = (full_pattern == ch) + ch_mask = full_pattern == ch mask[ch] = ch_mask sparse[ch] = cfa * ch_mask return sparse, mask - -