From 26683f6c3d6d7b99547a1600e9def9b3efb19e60 Mon Sep 17 00:00:00 2001 From: Vincent Leroy Date: Mon, 25 May 2020 13:20:35 +0200 Subject: [PATCH] Distant plugin test fixes --- src/sensors/distant.cpp | 26 ++---- src/sensors/tests/test_distant.py | 136 +++++++++++++++--------------- 2 files changed, 78 insertions(+), 84 deletions(-) diff --git a/src/sensors/distant.cpp b/src/sensors/distant.cpp index 0a71ae40b..8438d131f 100644 --- a/src/sensors/distant.cpp +++ b/src/sensors/distant.cpp @@ -40,7 +40,7 @@ section of the scene's bounding sphere. MTS_VARIANT class DistantSensor final : public Sensor { public: - MTS_IMPORT_BASE(Sensor, m_world_transform, m_needs_sample_3, m_film) + MTS_IMPORT_BASE(Sensor, m_world_transform, m_film) MTS_IMPORT_TYPES(Scene) DistantSensor(const Properties &props) : Base(props) { @@ -76,8 +76,6 @@ MTS_VARIANT class DistantSensor final : public Sensor { 0.5f + math::RayEpsilon) Log(Warn, "This sensor should be used with a reconstruction filter " "with a radius of 0.5 or lower (e.g. default box)"); - - m_needs_sample_3 = false; } void set_scene(const Scene *scene) override { @@ -88,8 +86,8 @@ MTS_VARIANT class DistantSensor final : public Sensor { } std::pair sample_ray(Float time, Float wavelength_sample, - const Point2f &spatial_sample, - const Point2f & /*direction_sample*/, + const Point2f & /*film_sample*/, + const Point2f &aperture_sample, Mask active) const override { MTS_MASKED_FUNCTION(ProfilerPhase::EndpointSampleRay, active); Ray3f ray; @@ -109,7 +107,7 @@ MTS_VARIANT class DistantSensor final : public Sensor { // If no target point is defined, sample a target point on the // bounding sphere's cross section Point2f offset = - warp::square_to_uniform_disk_concentric(spatial_sample); + warp::square_to_uniform_disk_concentric(aperture_sample); Vector3f perp_offset = trafo.transform_affine(Vector3f{ offset.x(), offset.y(), 0.f }); ray.o = m_bsphere.center + (perp_offset - ray.d) * m_bsphere.radius; @@ -118,15 +116,12 @@ MTS_VARIANT class DistantSensor final : public Sensor { } ray.update(); - return std::make_pair( - ray, m_has_target - ? wav_weight - : wav_weight * (math::Pi * sqr(m_bsphere.radius))); + return std::make_pair(ray, wav_weight); } std::pair sample_ray_differential( - Float time, Float wavelength_sample, const Point2f &spatial_sample, - const Point2f & /*direction_sample*/, Mask active) const override { + Float time, Float wavelength_sample, const Point2f & /*film_sample*/, + const Point2f &aperture_sample, Mask active) const override { MTS_MASKED_FUNCTION(ProfilerPhase::EndpointSampleRay, active); RayDifferential3f ray; ray.time = time; @@ -145,7 +140,7 @@ MTS_VARIANT class DistantSensor final : public Sensor { // If no target point is defined, sample a target point on the // bounding sphere's cross section Point2f offset = - warp::square_to_uniform_disk_concentric(spatial_sample); + warp::square_to_uniform_disk_concentric(aperture_sample); Vector3f perp_offset = trafo.transform_affine(Vector3f{ offset.x(), offset.y(), 0.f }); ray.o = m_bsphere.center + (perp_offset - ray.d) * m_bsphere.radius; @@ -158,10 +153,7 @@ MTS_VARIANT class DistantSensor final : public Sensor { ray.has_differentials = false; ray.update(); - return std::make_pair( - ray, m_has_target - ? wav_weight - : wav_weight * (math::Pi * sqr(m_bsphere.radius))); + return std::make_pair(ray, wav_weight); } /// This sensor does not occupy any particular region of space, return an diff --git a/src/sensors/tests/test_distant.py b/src/sensors/tests/test_distant.py index 4329dea10..b166c26bb 100644 --- a/src/sensors/tests/test_distant.py +++ b/src/sensors/tests/test_distant.py @@ -5,49 +5,47 @@ import mitsuba -def xml_sensor(direction=None, target=None, xpixel=1): +def dict_sensor(direction=None, target=None, fwidth=1): if direction is None: - xml_direction = "" + dict_direction = {} else: - if type(direction) is not str: - direction = ",".join([str(x) for x in direction]) - xml_direction = f"""""" + dict_direction = {"direction": direction} if target is None: - xml_target = "" + dict_target = {} else: - if type(target) is not str: - target = ",".join([str(x) for x in target]) - xml_target = f"""""" + dict_target = {"target": target} - xml_film = f""" - - - - """ + dict_film = { + "type": "hdrfilm", + "width": fwidth, + "height": 1, + "rfilter": {"type": "box"} + } - return f""" - {xml_direction} - {xml_target} - {xml_film} - """ + return { + "type": "distant", + **dict_direction, + **dict_target, + "film": dict_film, + } -def make_sensor(direction=None, target=None, xpixel=1): - from mitsuba.core.xml import load_string - return load_string(xml_sensor(direction, target, xpixel)) +def make_sensor(direction=None, target=None, fwidth=1): + from mitsuba.core.xml import load_dict + return load_dict(dict_sensor(direction, target, fwidth)) def test_construct(variant_scalar_rgb): - from mitsuba.core.xml import load_string + from mitsuba.core.xml import load_string, load_dict # Construct without parameters (wrong film size) with pytest.raises(RuntimeError): - sensor = load_string("""""") + sensor = load_dict({"type": "distant"}) # Construct with wrong film size with pytest.raises(RuntimeError): - sensor = make_sensor(xpixel=2) + sensor = make_sensor(fwidth=2) # Construct with minimal parameters sensor = make_sensor() @@ -110,16 +108,15 @@ def test_sample_ray(variant_scalar_rgb, direction, target, ray_kind): def make_scene(direction=[0, 0, -1], target=None): - from mitsuba.core.xml import load_string + from mitsuba.core.xml import load_dict - scene_xml = f""" - - {xml_sensor(direction, target)} - - - """ + dict_scene = { + "type": "scene", + "sensor": dict_sensor(direction, target), + "surface": {"type": "rectangle"} + } - return load_string(scene_xml) + return load_dict(dict_scene) @pytest.mark.parametrize("target", [[0, 0, 0], [0.5, 0, 1]]) @@ -143,12 +140,12 @@ def test_target(variant_scalar_rgb, target): @pytest.mark.parametrize("direction", [[0, 0, -1], [0.5, 0.5, -1]]) def test_intersection(variant_scalar_rgb, direction): # Check if the sensor correctly casts rays spread uniformly in the scene - direction = ek.normalize(direction) + direction = list(ek.normalize(direction)) scene = make_scene(direction=direction) sensor = scene.sensors()[0] sampler = sensor.sampler() - n_rays = 10000 + n_rays = 1000 isect = np.empty((n_rays, 3)) for i in range(n_rays): @@ -168,7 +165,7 @@ def test_intersection(variant_scalar_rgb, direction): # Average intersection locations should be (in average) centered # around (0, 0, 0) isect_valid = isect[~np.isnan(isect).all(axis=1)] - assert np.allclose(isect_valid[:, :2].mean(axis=0), 0., atol=1e-2) + assert np.allclose(isect_valid[:, :2].mean(axis=0), 0., atol=5e-2) assert np.allclose(isect_valid[:, 2], 0., atol=1e-5) # Check number of invalid intersections @@ -177,42 +174,47 @@ def test_intersection(variant_scalar_rgb, direction): # slanting factor (cos theta) w.r.t the sensor's direction n_invalid = np.count_nonzero(np.isnan(isect).all(axis=1)) assert np.allclose(n_invalid / n_rays, 1. - 2. / np.pi * - ek.dot(direction, [0, 0, -1]), atol=1e-2) + ek.dot(direction, [0, 0, -1]), atol=0.1) @pytest.mark.parametrize("radiance", [10**x for x in range(-3, 4)]) def test_render(variant_scalar_rgb, radiance): # Test render results with a simple scene - from mitsuba.core.xml import load_string - import numpy as np - - scene_xml = """ - - - - - - - - - - - - - - - - - - - - - - - - """ - - scene = load_string(scene_xml, spp=1, radiance=radiance) + from mitsuba.core.xml import load_dict + from mitsuba.core import Bitmap, Struct + + def dict_scene(radiance=1.0, spp=1): + return { + "type": "scene", + "shape": { + "type": "rectangle", + "bsdf": {"type": "conductor"}, + }, + "integrator": {"type": "path"}, + "sensor": { + "type": "distant", + "film": { + "type": "hdrfilm", + "width": 1, + "height": 1, + "pixel_format": "rgb", + "rfilter": {"type": "box"}, + }, + "sampler": { + "type": "independent", + "sample_count": spp + }, + }, + "emitter": { + "type": "constant", + "radiance": { + "type": "spectrum", + "value": radiance + } + } + } + + scene = load_dict(dict_scene(spp=1, radiance=radiance)) sensor = scene.sensors()[0] scene.integrator().render(scene, sensor) img = sensor.film().bitmap()