Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • main
1 result

Target

Select target project
  • QIM/tools/3dim-reconstruction
1 result
Select Git revision
  • main
1 result
Show changes

Commits on Source 2

......@@ -4,6 +4,7 @@ import argparse
import re
from multiprocessing import Pool
import itertools
import math
import zarr
import numpy as np
......@@ -16,6 +17,11 @@ from cil.processors import TransmissionAbsorptionConverter
from cil.recon import FDK
from cil.utilities.display import show2D, show_geometry
from cyclopts import App, Parameter
from typing import Annotated
app = App()
def create_reader(file_name, roi=None):
if file_name.endswith('.txrm'):
DataReader = ZEISSDataReader
......@@ -43,7 +49,7 @@ def save_center_slice(arr, path):
plt.close()
class ChunkedCT:
def __init__(self, config_path, tiff_dir, proj_chunk_size, vol_chunks, batch_size=20, processes=None, verbose=1):
def __init__(self, config_path, tiff_dir, output_dir, angle_chunks, vol_chunks, read_batch_size=20, processes=None, verbose=None):
"""
Perform CT reconstruction by chunking the projections and reconstruction volume.
......@@ -51,36 +57,44 @@ class ChunkedCT:
Parameters:
config_path: Path to CT config file.
tiff_dir: Directory where all tiffs are assumed to be projections.
proj_chunk_size: Number of projections per projection chunk.
tiff_dir: Directory where all TIFFs are assumed to be projections. Defaults to parent of config_path.
angle_chunks: How many chunks should the projections be chunked along the angle dimension.
vol_chunks: Tuple of number of chunks along each direction in the order of Z, Y, X. Must be divisors of the corresponding default ig dimension length.
batch_size: Number of TIFFs per worker at a time during parallelized reading.
read_batch_size: Number of TIFFs per worker at a time during parallelized reading.
processes: Number of workers during parallelized reading. Default None chooses this to all available CPUs.
verbose: If slices from the reconstructions should be saved along the way to see the progress visually. Default to 1.
Usage:
self.write_projection_chunks() followed by self.reconstruct()
The former may be omitted if the projection chunks are already are already prepared.
"""
self.config_path = config_path
if tiff_dir is None:
self.tiff_dir = Path(self.config_path).parent
else:
self.tiff_dir = tiff_dir
self.set_tiff_paths()
self.output_dir = Path(output_dir)
self.angle_chunks = angle_chunks
self.vol_chunks = vol_chunks
self.proj_chunk_size = proj_chunk_size
self.read_batch_size = read_batch_size
self.processes = processes
self.batch_size = batch_size
os.makedirs('out', exist_ok=True)
self.recon_zarr_path = 'out/recon.zarr'
self.proj_zarr_path = 'out/projections.zarr'
self.verbose = verbose
os.makedirs(self.output_dir, exist_ok=True)
self.recon_zarr_path = self.output_dir / 'recon.zarr'
self.proj_zarr_path = self.output_dir / 'projections.zarr'
self.verbose = 1 if verbose is None else verbose
if self.verbose:
os.makedirs('out/center', exist_ok=True)
os.makedirs(self.output_dir / 'center', exist_ok=True)
self.ag = create_reader(config_path).get_geometry()
self.ag = create_reader(self.config_path).get_geometry()
self.ig = self.ag.get_ImageGeometry()
self.proj_shape = self.ag.shape
if not all(self.ig.shape[i] % self.vol_chunks[i] == 0 for i in range(3)):
raise ValueError(f"ig shape not divisible by vol_chunks in all dimensions")
self.vol_chunk_shape = tuple(self.ig.shape[i] // self.vol_chunks[i] for i in range(3))
self.angle_chunk_size = math.ceil(self.proj_shape[0] / self.angle_chunks)
self.vol_chunk_shape = tuple(math.ceil(self.ig.shape[i] / self.vol_chunks[i]) for i in range(3))
def set_tiff_paths(self):
"""
......@@ -96,7 +110,7 @@ class ChunkedCT:
"""
return [atoi(c) for c in re.split(r'(\d+)', text)]
tiff_paths = [f for f in os.listdir(self.tiff_dir) if f.endswith(".tif")]
tiff_paths = [f for f in os.listdir(self.tiff_dir) if f.endswith(('.tif', '.tiff'))]
tiff_paths.sort(key=natural_keys)
self.tiff_paths = [os.path.join(self.tiff_dir, f) for f in tiff_paths]
......@@ -105,25 +119,24 @@ class ChunkedCT:
return ski.io.imread(tiff_path)
def process_chunk(self, chunk_index, pool):
start = chunk_index * self.proj_chunk_size
end = min((chunk_index + 1) * self.proj_chunk_size, self.proj_shape[0])
start = chunk_index * self.angle_chunk_size
end = min((chunk_index + 1) * self.angle_chunk_size, self.proj_shape[0])
chunk = np.stack(pool.map(
ChunkedCT.read_tiff,
self.tiff_paths[start:end],
chunksize=self.batch_size
chunksize=self.read_batch_size
), axis=0)
expected_shape = (end - start, *self.proj_shape[1:])
if chunk.shape != expected_shape:
raise ValueError(f'Chunk shape mismatch: expected {expected_shape}, got {chunk.shape}')
zarr_array = zarr.open_array(store=self.proj_zarr_path, mode='a')
zarr_array[start:end, :, :] = chunk
proj_zarr = zarr.open_array(store=self.proj_zarr_path, mode='a')
proj_zarr.blocks[chunk_index] = chunk
print(f'Finished writing projection chunk {chunk_index}.')
def write_projection_chunks(self):
nchunks = (self.proj_shape[0] - 1) // self.proj_chunk_size + 1
# compressor = zarr.codecs.Blosc(cname='zstd')
zarr.open_array(
......@@ -131,22 +144,22 @@ class ChunkedCT:
mode='w',
dtype=np.uint16,
shape=self.proj_shape,
chunks=(self.proj_chunk_size, *self.proj_shape[1:]),
chunks=(self.angle_chunk_size, *self.proj_shape[1:]),
compressor=None
)
with Pool(self.processes) as pool:
print(f'processes={pool._processes}')
for i in range(nchunks):
for i in range(self.angle_chunks):
self.process_chunk(i, pool)
def reconstruct_projection_chunk(self, ig, proj_chunk_index):
start = proj_chunk_index * self.proj_chunk_size
end = min((proj_chunk_index + 1) * self.proj_chunk_size, self.ag.shape[0])
start = proj_chunk_index * self.angle_chunk_size
end = min((proj_chunk_index + 1) * self.angle_chunk_size, self.ag.shape[0])
ag_chunk = self.ag.copy().set_angles(self.ag.angles[start:end])
zarr_array = zarr.open_array(store=self.proj_zarr_path, mode='r')
chunk = zarr_array[start:end, :, :].astype(np.float32) / (2**16 - 1) # here assuming TIFFs are uint16
proj_zarr = zarr.open_array(store=self.proj_zarr_path, mode='r')
chunk = proj_zarr.blocks[proj_chunk_index].astype(np.float32) / (2**16 - 1) # here assuming TIFFs are uint16
data_chunk = AcquisitionData(array=chunk, deep_copy=False, geometry=ag_chunk)
data_chunk = TransmissionAbsorptionConverter()(data_chunk)
......@@ -172,29 +185,23 @@ class ChunkedCT:
def reconstruct_volume_chunk(self, chunk_coords):
ig_chunk = self.make_ig_chunk(chunk_coords)
num_proj_chunks = (self.proj_shape[0] - 1) // self.proj_chunk_size + 1
vol_accumulated = np.zeros(ig_chunk.shape, dtype=np.float32)
for proj_chunk_index in range(num_proj_chunks):
for proj_chunk_index in range(self.angle_chunks):
recon_chunk = self.reconstruct_projection_chunk(ig_chunk, proj_chunk_index)
vol_accumulated += recon_chunk.as_array()
if self.verbose:
str_chunk = '_'.join(str(i) for i in chunk_coords)
save_center_slice(recon_chunk.as_array(), f'out/center/recon_vol_{str_chunk}_proj_{proj_chunk_index}.png')
save_center_slice(recon_chunk.as_array(), self.output_dir / f'center/recon_vol_{str_chunk}_proj_{proj_chunk_index}.png')
if self.verbose:
save_center_slice(vol_accumulated, f'out/center/recon_{str_chunk}_accumulated.png')
save_center_slice(vol_accumulated, self.output_dir / f'center/recon_{str_chunk}_accumulated.png')
zarr_array = zarr.open_array(store=self.recon_zarr_path, mode='a')
start = [s*c for s, c in zip(self.vol_chunk_shape, chunk_coords)]
end = [s*(c+1) for s, c in zip(self.vol_chunk_shape, chunk_coords)]
zarr_array[start[0]:end[0], start[1]:end[1], start[2]:end[2]] = vol_accumulated
recon_zarr = zarr.open_array(store=self.recon_zarr_path, mode='a')
recon_zarr.blocks[*chunk_coords] = vol_accumulated
print(f'Finished writing volume chunk {chunk_coords}.')
def reconstruct(self):
self.write_projection_chunks()
# compressor = zarr.codecs.Blosc(cname='zstd')
zarr.open_array(
store=self.recon_zarr_path,
......@@ -207,25 +214,52 @@ class ChunkedCT:
for chunk_coords in itertools.product(*map(range, self.vol_chunks)):
self.reconstruct_volume_chunk(chunk_coords)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument('-m', '--meta_path', type=str, required=True, help='Path to CT scan metadata file.')
# parser.add_argument('-t', '--tiff_dir', type=str, default=True, help='Path to directory of TIFF projections.')
parser.add_argument('-p', '--processes', type=int, default=None, help="Number of processes for multiprocessing. None uses all available.")
parser.add_argument('-c', '--chunk-size', type=int, default=100, help="Projection chunk size.")
parser.add_argument('-v', '--vol-chunks', type=str, default='1,1,1', help="Volume chunks specified as number of chunks in each dimension e.g. 2,2,2")
args = parser.parse_args()
vol_chunks = tuple(int(d) for d in args.vol_chunks.split(','))
@app.default
def main(
config_path: str=None,
tiff_dir: str=None,
output_dir: str='out',
angle_chunks: int=1,
vol_chunks: str='1,1,1',
processes: str=None,
verbose: int=None,
):
"""
Parameters
----------
config_path: str
Path to CT scan metadata file.
tiff_dir: str
Path to directory of TIFF projections. Defaults to the parent folder of config_path.
output_dir: str
Directory where everything is outputted.
angle_chunks: int
How many chunks should the projections be chunked along the angle dimension.
vol_chunks: str
Volume chunks specified as CSV number of chunks in each dimension e.g. 2,2,2
processes: str
Number of processes for multiprocessing. None uses all available.
verbose: int
"""
vol_chunks = tuple(int(d) for d in vol_chunks.split(','))
if len(vol_chunks) != 3:
raise ValueError
# Currently for ease of testing
if config_path is None:
config_path = '/dtu/3d-imaging-center/projects/2021_DANFIX_Casper/raw_data_3DIM/Casper_top_3_2 [2021-03-17 16.54.39]/Casper_top_3_2_recon.xtekct'
tiff_dir = Path(config_path).parent
processor = ChunkedCT(
config_path=config_path, tiff_dir=tiff_dir,
proj_chunk_size=args.chunk_size, vol_chunks=vol_chunks, processes=args.processes
config_path=config_path,
tiff_dir=tiff_dir,
output_dir=output_dir,
angle_chunks=angle_chunks,
vol_chunks=vol_chunks,
processes=processes,
)
processor.write_projection_chunks()
processor.reconstruct()
if __name__ == '__main__':
app()
\ No newline at end of file
Source diff could not be displayed: it is too large. Options to address this: view the blob.