Skip to content

Commit 1b5305b

Browse files
giovannivolpeBenjaminMidtvedtgithub-actions[bot]JesusPinedaCHenrik-KM
authored
1.5.1 (#152)
* chore: autopublish 2022-07-26T13:54:44Z * Remove create-badges job * Delete test.py * Add multi-head masked attention * Update multi-head gated attention to match parent layer * Update documentation * Test multi-head masked attention * allow gated attention layers to use bias * test bias in gated attention layers * set return_attention_weights to False to avoid multi-outputs Use MultiHeadSelfAttention and MultiHeadGatedSelfAttention if want to return the attention weights * reformat gnns/layers.py This commit adds new message-passing graph layers (MPN) and graph convolutional layers to dt, including vanilla MPN, GRUMPN, Masked-attention FGNN, and GraphTransformer. * Update layers.py * Update test_layers.py * Update models.py * Update test_models.py * Update test_models.py * Fix indexing problems related to tf.gather * Allow multi-inputs in ContinuousGenerator * Fix bad conversion to integer * version bump * Fix phase correction at focus and offset calculation * Fix phase correction in propagation * Fix mie phase out of foucs * Fix mie phase out of foucs * Update README.md * Bm/version 1.4.0 (#137) * Update layers.py * Update convolutional.py Transformer-based models can now be reused and expanded quickly and easily * Update documentation * Update Transformer-based models * Delete classifying_MNIST_vit_tutorial.ipynb * Create classifying_MNIST_vit_tutorial.ipynb * Update datasets.py * Allows kwargs as inputs in single_layer_call * Update embeddings.py * masked transformers * reformat transformer models * Create trajectory_analysis_tutorial.ipynb * Add Variational autoencoders * Add variational autoencoders * Update vae.py * Create MNIST_VAE_tutorial.ipynb * Update MNIST_VAE_tutorial.ipynb * Create folder for course examples * Update README.md * Update README.md * Update examples * Update README.md * Update README.md * Update MNIST VAE examples * Added MLP regression example * Update README.md * Create image_segmentation_Unet.ipynb * Update README.md * Documented and tested cell_counting_tutorial.ipynb * improve dnn example * Shift variant mie * Position mie scatterer correctly * implement set z * implement mnist v1 * implement z dependence * remove logging * Implement flattening methods * Implement pooling and resizing * Implement TensorflowDataset * Finalize MNIST * Implement Malaria classification * alpha0 release * fix batchsize in fit * implement dataset.take * Implement datasets * fix phase in mie * Fix mie positioning and focusing * Commit to new branch * add tensorflow datasets dependence * remove test Co-authored-by: Jesús Pineda <[email protected]> Co-authored-by: Jesús Pineda <[email protected]> Co-authored-by: Benjamin Midtvedt <[email protected]> Co-authored-by: Ccx55 <[email protected]> * Add tensorflow datasets to the list of dependencies. * Read requirements.txt into setup.py * remove sphinx from build * remove create badges * Create CITATION.cff * Create .zenodo.json * Update transformer models * Update pint_definition.py * Update requirements.txt * create TimeDistributed CNN * small fixes to lodestar * remove direct getter of properties * Update scatterers.py Coherence length fix for MieScatterer * Update scatterers.py Added coherence length to the conversion table * mie phase fix Co-authored-by: BenjaminMidtvedt <[email protected]> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> Co-authored-by: Jesús Pineda <[email protected]> Co-authored-by: Benjamin Midtvedt <[email protected]> Co-authored-by: Jesús Pineda <[email protected]> Co-authored-by: Ccx55 <[email protected]> Co-authored-by: Harshith Bachimanchi <[email protected]>
1 parent 8685af1 commit 1b5305b

File tree

2 files changed

+46
-22
lines changed

2 files changed

+46
-22
lines changed

deeptrack/optics.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -687,11 +687,11 @@ def get(self, illuminated_volume, limits, fields, **kwargs):
687687

688688
if not kwargs.get("return_field", False):
689689
output_image = np.square(np.abs(output_image))
690-
else:
691-
# Fudge factor. Not sure why this is needed.
692-
output_image = output_image - 1
693-
output_image = output_image * np.exp(1j * -np.pi / 4)
694-
output_image = output_image + 1
690+
# else:
691+
# Fudge factor. Not sure why this is needed.
692+
# output_image = output_image - 1
693+
# output_image = output_image * np.exp(1j * -np.pi / 4)
694+
# output_image = output_image + 1
695695

696696
output_image.properties = illuminated_volume.properties
697697

deeptrack/scatterers.py

Lines changed: 41 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,9 @@ class MieScatterer(Scatterer):
498498
return_fft : bool
499499
If True, the feature returns the fft of the field, rather than the
500500
field itself.
501+
coherence_length : float
502+
The temporal coherence length of a partially coherent light given in meters. If None, the illumination is
503+
assumed to be coherent.
501504
"""
502505

503506
__gpu_compatible__ = True
@@ -508,6 +511,7 @@ class MieScatterer(Scatterer):
508511
collection_angle=(u.radian, u.radian),
509512
wavelength=(u.meter, u.meter),
510513
offset_z=(u.meter, u.meter),
514+
coherence_length=(u.meter, u.pixel),
511515
)
512516

513517
def __init__(
@@ -527,6 +531,7 @@ def __init__(
527531
working_distance=1000000, # large value to avoid numerical issues unless the user specifies a smaller value
528532
position_objective=(0, 0),
529533
return_fft=False,
534+
coherence_length=None,
530535
**kwargs,
531536
):
532537
if polarization_angle is not None:
@@ -555,6 +560,7 @@ def __init__(
555560
working_distance=working_distance,
556561
position_objective=position_objective,
557562
return_fft=return_fft,
563+
coherence_length=coherence_length,
558564
**kwargs,
559565
)
560566

@@ -574,20 +580,22 @@ def _process_properties(self, properties):
574580
)
575581

576582
if properties["offset_z"] == "auto":
583+
size = (
584+
np.array(properties["output_region"][2:])
585+
- properties["output_region"][:2]
586+
)
587+
xSize, ySize = size
588+
arr = pad_image_to_fft(np.zeros((xSize, ySize))).astype(complex)
589+
min_edge_size = np.min(arr.shape)
577590
properties["offset_z"] = (
578-
np.min(
579-
np.array(properties["output_region"][2:])
580-
- properties["output_region"][:2]
581-
)
582-
* 0.75
591+
min_edge_size
592+
* 0.45
583593
* min(properties["voxel_size"][:2])
584594
/ np.tan(properties["collection_angle"])
585595
)
586596
return properties
587597

588-
def get_xy_size(self):
589-
output_region = self.properties["output_region"]()
590-
padding = self.properties["padding"]()
598+
def get_xy_size(self, output_region, padding):
591599
return (
592600
output_region[2] - output_region[0] + padding[0] + padding[2],
593601
output_region[3] - output_region[1] + padding[1] + padding[3],
@@ -599,7 +607,7 @@ def get_XY(self, shape, voxel_size):
599607
return np.meshgrid(x * voxel_size[0], y * voxel_size[1], indexing="ij")
600608

601609
def get_detector_mask(self, X, Y, radius):
602-
return np.sqrt(X ** 2 + Y ** 2) < radius
610+
return np.sqrt(X**2 + Y**2) < radius
603611

604612
def get_plane_in_polar_coords(self, shape, voxel_size, plane_position):
605613

@@ -612,8 +620,8 @@ def get_plane_in_polar_coords(self, shape, voxel_size, plane_position):
612620
Y = Y + plane_position[1]
613621
Z = plane_position[2] # might be +z or -z
614622

615-
R2_squared = X ** 2 + Y ** 2
616-
R3 = np.sqrt(R2_squared + Z ** 2) # might be +z instead of -z
623+
R2_squared = X**2 + Y**2
624+
R3 = np.sqrt(R2_squared + Z**2) # might be +z instead of -z
617625

618626
# get the angles
619627
cos_theta = Z / R3
@@ -639,11 +647,13 @@ def get(
639647
working_distance,
640648
position_objective,
641649
return_fft,
650+
coherence_length,
651+
output_region,
642652
**kwargs,
643653
):
644654

645655
# Get size of the output
646-
xSize, ySize = self.get_xy_size()
656+
xSize, ySize = self.get_xy_size(output_region, padding)
647657
voxel_size = get_active_voxel_size()
648658
arr = pad_image_to_fft(np.zeros((xSize, ySize))).astype(complex)
649659
arr = image.maybe_cupy(arr)
@@ -672,11 +682,11 @@ def get(
672682
# x and y position of a beam passing through field evaluation plane on the objective
673683
x_farfield = (
674684
position[0]
675-
+ R3_field * np.sqrt(1 - cos_theta_field ** 2) * cos_phi_field / ratio
685+
+ R3_field * np.sqrt(1 - cos_theta_field**2) * cos_phi_field / ratio
676686
)
677687
y_farfield = (
678688
position[1]
679-
+ R3_field * np.sqrt(1 - cos_theta_field ** 2) * sin_phi_field / ratio
689+
+ R3_field * np.sqrt(1 - cos_theta_field**2) * sin_phi_field / ratio
680690
)
681691

682692
# if the beam is within the pupil
@@ -720,19 +730,33 @@ def get(
720730
S2 = sum([E[i] * B[i] * PI[i] + E[i] * A[i] * TAU[i] for i in range(0, L)])
721731

722732
arr[pupil_mask] = (
723-
1j
733+
-1j
724734
/ (k * R3_field)
725735
* np.exp(1j * k * R3_field)
726736
* (S2 * S2_coef + S1 * S1_coef)
727737
)
728738

739+
# For partially coherent illumination
740+
if coherence_length:
741+
sigma = z * np.sqrt((coherence_length / z + 1) ** 2 - 1)
742+
sigma = sigma * (offset_z / z)
743+
744+
mask = np.zeros_like(arr)
745+
y, x = np.ogrid[
746+
-mask.shape[0] // 2 : mask.shape[0] // 2,
747+
-mask.shape[1] // 2 : mask.shape[1] // 2,
748+
]
749+
mask = np.exp(-0.5 * (x**2 + y**2) / ((sigma) ** 2))
750+
751+
arr = arr * mask
752+
729753
fourier_field = np.fft.fft2(arr)
730754

731755
propagation_matrix = get_propagation_matrix(
732756
fourier_field.shape,
733757
pixel_size=voxel_size[2],
734-
wavelength=wavelength,
735-
to_z=(-offset_z - z) / refractive_index_medium,
758+
wavelength=wavelength / refractive_index_medium,
759+
to_z=(-offset_z - z),
736760
dy=(
737761
relative_position[0] * ratio
738762
+ position[0]

0 commit comments

Comments
 (0)