Shadow catcher? #1368
Shadow catcher?
#1368
-
Beta Was this translation helpful? Give feedback.
Answered by
shinyoung-yi
Oct 29, 2024
Replies: 1 comment 2 replies
-
In the best of my knowledge, most of research-oriented physically based ray tracing systems including Mitsuba 3 do not support such feature, since such shadow catcher objects cannot be physically realizable. My Ad-hoc SolutionPrepare your sceneimport numpy as np
import matplotlib.pyplot as plt
import mitsuba as mi
mi.set_variant('cuda_ad_rgb', 'llvm_ad_rgb')
print(f"{mi.__version__ = }") # mi.__version__ = '3.5.2'
def get_scene_dict() -> dict:
scene_dict = mi.cornell_box()
for key in ['red', 'green', 'floor', 'ceiling', 'green-wall', 'red-wall']:
scene_dict.pop(key)
scene_dict['light']['to_world'] @= mi.ScalarTransform4f.scale([1.8, 1.8, 1.8])
scene_dict['sensor']['film']['pixel_format'] = 'rgba'
return scene_dict
scene_dict = get_scene_dict() # You can change this variable to whatever you want
scene: mi.Scene = mi.load_dict(scene_dict)
img0 = mi.render(scene).numpy()
def imshow(img: np.ndarray, title=None):
fig, axes = plt.subplots(1, 2)
axes[0].imshow(np.clip(img, 0, 1) ** (1/2.2))
axes[0].set_axis_off()
if title is not None:
axes[0].set_title(title)
im = axes[1].imshow(img[:,:,3])
axes[1].set_axis_off()
axes[1].set_title("Alpha")
plt.colorbar(im, shrink=0.5)
imshow(img0, title='img0') Extract emitter configurationThis step is somewhat manual. sensor: mi.Sensor = scene.sensors()[0]
integrator: mi.Integrator =scene.integrator()
# I have no idea to generalize it. We need to keep the names (keys) of plugins related to the emitters manually.
emitters_dict = {'type': 'scene'}
for key in ['white', 'light']:
emitters_dict[key] = scene_dict[key]
img_emitter_only = mi.render(mi.load_dict(emitters_dict), integrator=integrator, sensor=sensor, spp=64).numpy()
imshow(img_emitter_only, title='img_emitter_only') Prepare your catcher objectcatcher_dict = {
'type': 'scene',
'catcher': {
'type': 'rectangle',
# Any pose `to_world` you want
'to_world': mi.ScalarTransform4f.translate([0, -1, 0]).rotate([1, 0, 0], -90),
'bsdf': {'type': 'diffuse',
'reflectance': {'type': 'rgb', 'value': [1, 1, 1]}}
}
}
emitters_catcher_dict = emitters_dict.copy()
emitters_catcher_dict.update(catcher_dict)
integ_direct = mi.load_dict({'type': 'direct'})
img_catcher = mi.render(mi.load_dict(emitters_catcher_dict), integrator=integ_direct, sensor=sensor, spp=64).numpy()
imshow(img_catcher, title='img_catcher') scene_catcher_dict = scene_dict.copy()
scene_catcher_dict.update(catcher_dict)
img_scene_catcher = mi.render(mi.load_dict(scene_catcher_dict), integrator=integ_direct, sensor=sensor, spp=64).numpy()
imshow(img_scene_catcher, title='img_scene_catcher') Compute a layer only consisting of the shadow on the catcherdef get_gray(img: np.ndarray) -> np.ndarray:
return img[...,0] * 0.299 + img[...,1] * 0.587 + img[...,2] * 0.114
mask = img_catcher[:,:,3] > 0
shadow_2d = get_gray(img_scene_catcher[mask]) / get_gray(img_catcher[mask])
img_shadow = np.zeros(img_catcher.shape)
img_shadow[mask,3] = np.clip(1 - shadow_2d, 0, 1)
imshow(img_shadow, title='img_shadow') Final step: alpha compositiondef alpha_compose(top_layer: np.ndarray, bottom_layer: np.ndarray):
alpha_t = top_layer[..., 3:]; alpha_b = bottom_layer[..., 3:] # [h, w, 1]
color_t = top_layer[..., :3]; color_b = bottom_layer[..., :3] # [h, w, 3]
alpha = alpha_t + alpha_b * (1-alpha_t)
color = color_t*alpha_t + color_b*alpha_b*(1-alpha_t)
np.divide(color, alpha, out=color, where=(alpha != 0))
return np.concatenate([color, alpha], -1)
img_final = alpha_compose(img0, img_shadow)
imshow(img_final, title='img_final') Check it can be composed with another image well. img_check = np.ones(img_final.shape)
I, J = np.indices(img_check.shape[:2])
img_check[((I//5)%2 == 0) ^ ((J//5)%2 == 0), :3] = 0.2
imshow(alpha_compose(img_final, img_check), title="alpha compose with a texture") |
Beta Was this translation helpful? Give feedback.
2 replies
Answer selected by
StarsTesla
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
In the best of my knowledge, most of research-oriented physically based ray tracing systems including Mitsuba 3 do not support such feature, since such shadow catcher objects cannot be physically realizable.
However, there can be an ad-hoc solution to achieve that, while it would not be simple.
My Ad-hoc Solution
Prepare your scene