Skip to content

Configuration

This page contains class defined in the hkw.setup module.

Config

Config dataclass

Configuration for rendering.

Attributes:

Name Type Description
sensor Sensor

Sensor settings.

film Film

Film settings.

sampler Sampler

Sampler settings.

emitters list[Emitter]

Emitter settings.

integrator Integrator

Integrator settings.

render_passes set[str]

Set of active render passes. Recognised values:

  • "albedo" – diffuse color without shading.
  • "depth" – depth buffer.
  • "normal" – shading-normal pass.
  • "facet_id" – per-face index encoded as RGB (Blender only).

The convenience properties :attr:albedo, :attr:depth, :attr:normal, and :attr:facet_id are thin aliases that add or remove the corresponding string from this set.

Source code in hakowan/setup/config.py
@dataclass(kw_only=True, slots=True)
class Config:
    """Configuration for rendering.

    Attributes:
        sensor: Sensor settings.
        film: Film settings.
        sampler: Sampler settings.
        emitters: Emitter settings.
        integrator: Integrator settings.
        render_passes: Set of active render passes.  Recognised values:

            - ``"albedo"``    – diffuse color without shading.
            - ``"depth"``     – depth buffer.
            - ``"normal"``    – shading-normal pass.
            - ``"facet_id"``  – per-face index encoded as RGB (Blender only).

            The convenience properties :attr:`albedo`, :attr:`depth`,
            :attr:`normal`, and :attr:`facet_id` are thin aliases that add or
            remove the corresponding string from this set.
    """

    sensor: Sensor = field(default_factory=Perspective)
    film: Film = field(default_factory=Film)
    sampler: Sampler = field(default_factory=Independent)
    emitters: list[Emitter] = field(default_factory=lambda: [Envmap()])
    integrator: Integrator = field(default_factory=Path)
    _render_passes: set[str] = field(default_factory=set)

    def z_up(self):
        """Update configuration for z-up coordinate system."""
        self.sensor.location = np.array([0, -5, 0])
        self.sensor.up = np.array([0, 0, 1])
        for emitter in self.emitters:
            if isinstance(emitter, Envmap):
                emitter.up = np.array([0, 0, 1])
                emitter.rotation = 180.0

    def z_down(self):
        """Update configuration for z-down coordinate system."""
        self.sensor.location = np.array([0, 5, 0])
        self.sensor.up = np.array([0, 0, -1])
        for emitter in self.emitters:
            if isinstance(emitter, Envmap):
                emitter.up = np.array([0, 0, -1])
                emitter.rotation = 180.0

    def y_up(self):
        """Update configuration for y-up coordinate system."""
        self.sensor.location = np.array([0, 0, 5])
        self.sensor.up = np.array([0, 1, 0])
        for emitter in self.emitters:
            if isinstance(emitter, Envmap):
                emitter.up = np.array([0, 1, 0])
                emitter.rotation = 180.0

    def y_down(self):
        """Update configuration for y-down coordinate system."""
        self.sensor.location = np.array([0, 0, -5])
        self.sensor.up = np.array([0, -1, 0])
        for emitter in self.emitters:
            if isinstance(emitter, Envmap):
                emitter.up = np.array([0, -1, 0])
                emitter.rotation = 180.0

    # ------------------------------------------------------------------ #
    # render_passes – primary interface                                    #
    # ------------------------------------------------------------------ #

    @property
    def render_passes(self) -> set[str]:
        """Set of active render passes.

        Valid pass names are ``"albedo"``, ``"depth"``, ``"normal"``, and
        ``"facet_id"``.  Assigning a new collection replaces the entire set
        and re-synchronises the Mitsuba AOV integrator accordingly.

        Example::

            config.render_passes = {"albedo", "depth"}
        """
        return self._render_passes

    @render_passes.setter
    def render_passes(self, value: set[str] | list[str]):
        """Replace the active render-pass set and re-sync AOV integrator."""
        self._render_passes = set(value)
        self.__sync_aovs()

    # ------------------------------------------------------------------ #
    # Convenience boolean aliases                                          #
    # ------------------------------------------------------------------ #

    @property
    def albedo(self) -> bool:
        """Whether the albedo pass is active.  Alias for ``"albedo" in render_passes``."""
        return "albedo" in self._render_passes

    @albedo.setter
    def albedo(self, value: bool):
        """Add or remove the albedo pass.  Also updates the Mitsuba AOV integrator."""
        if value:
            self._render_passes.add("albedo")
        else:
            self._render_passes.discard("albedo")
        self.__sync_aovs()

    @property
    def depth(self) -> bool:
        """Whether the depth pass is active.  Alias for ``"depth" in render_passes``."""
        return "depth" in self._render_passes

    @depth.setter
    def depth(self, value: bool):
        """Add or remove the depth pass.  Also updates the Mitsuba AOV integrator."""
        if value:
            self._render_passes.add("depth")
        else:
            self._render_passes.discard("depth")
        self.__sync_aovs()

    @property
    def normal(self) -> bool:
        """Whether the shading-normal pass is active.  Alias for ``"normal" in render_passes``."""
        return "normal" in self._render_passes

    @normal.setter
    def normal(self, value: bool):
        """Add or remove the normal pass.  Also updates the Mitsuba AOV integrator."""
        if value:
            self._render_passes.add("normal")
        else:
            self._render_passes.discard("normal")
        self.__sync_aovs()

    @property
    def facet_id(self) -> bool:
        """Whether the facet-ID pass is active.  Alias for ``"facet_id" in render_passes``.

        When active the Blender backend performs a second render after the
        main one.  Every mesh face is colored with the RGB encoding of its
        zero-based index (R = high byte, G = mid byte, B = low byte) using a
        flat Emission shader so lighting has no effect.  The output is written
        to ``<stem>_facet_id<ext>`` with gamma correction, temporal blending,
        and pixel filtering all disabled so pixel values can be decoded
        directly::

            fid = (R << 16) | (G << 8) | B

        Background pixels have ``A = 0`` and can be masked out.  Supports up
        to 2**24 − 1 ≈ 16.7 M faces.
        """
        return "facet_id" in self._render_passes

    @facet_id.setter
    def facet_id(self, value: bool):
        """Add or remove the facet-ID pass."""
        if value:
            self._render_passes.add("facet_id")
        else:
            self._render_passes.discard("facet_id")

    # ------------------------------------------------------------------ #
    # Internal helpers                                                     #
    # ------------------------------------------------------------------ #

    def __sync_aovs(self):
        """Rebuild the Mitsuba AOV integrator from the current render-pass set.

        Strips any existing AOV wrapper and re-adds only the passes that are
        currently active, ensuring the integrator always reflects the exact
        state of ``_render_passes``.
        """
        # Strip the AOV wrapper (if any) to start from the base integrator.
        if isinstance(self.integrator, AOV):
            self.integrator = self.integrator.integrator or Path()

        # Re-add AOVs for every active pass that has a Mitsuba counterpart.
        _pass_to_aov = {
            "albedo": "albedo:albedo",
            "depth": "depth:depth",
            "normal": "sh_normal:sh_normal",
        }
        for pass_name, aov_str in _pass_to_aov.items():
            if pass_name in self._render_passes:
                if not isinstance(self.integrator, AOV):
                    self.integrator = AOV(aovs=[aov_str], integrator=self.integrator)
                elif aov_str not in self.integrator.aovs:
                    self.integrator.aovs.append(aov_str)

albedo property writable

Whether the albedo pass is active. Alias for "albedo" in render_passes.

depth property writable

Whether the depth pass is active. Alias for "depth" in render_passes.

facet_id property writable

Whether the facet-ID pass is active. Alias for "facet_id" in render_passes.

When active the Blender backend performs a second render after the main one. Every mesh face is colored with the RGB encoding of its zero-based index (R = high byte, G = mid byte, B = low byte) using a flat Emission shader so lighting has no effect. The output is written to <stem>_facet_id<ext> with gamma correction, temporal blending, and pixel filtering all disabled so pixel values can be decoded directly::

fid = (R << 16) | (G << 8) | B

Background pixels have A = 0 and can be masked out. Supports up to 2**24 − 1 ≈ 16.7 M faces.

normal property writable

Whether the shading-normal pass is active. Alias for "normal" in render_passes.

render_passes property writable

Set of active render passes.

Valid pass names are "albedo", "depth", "normal", and "facet_id". Assigning a new collection replaces the entire set and re-synchronises the Mitsuba AOV integrator accordingly.

Example::

config.render_passes = {"albedo", "depth"}

__sync_aovs()

Rebuild the Mitsuba AOV integrator from the current render-pass set.

Strips any existing AOV wrapper and re-adds only the passes that are currently active, ensuring the integrator always reflects the exact state of _render_passes.

Source code in hakowan/setup/config.py
def __sync_aovs(self):
    """Rebuild the Mitsuba AOV integrator from the current render-pass set.

    Strips any existing AOV wrapper and re-adds only the passes that are
    currently active, ensuring the integrator always reflects the exact
    state of ``_render_passes``.
    """
    # Strip the AOV wrapper (if any) to start from the base integrator.
    if isinstance(self.integrator, AOV):
        self.integrator = self.integrator.integrator or Path()

    # Re-add AOVs for every active pass that has a Mitsuba counterpart.
    _pass_to_aov = {
        "albedo": "albedo:albedo",
        "depth": "depth:depth",
        "normal": "sh_normal:sh_normal",
    }
    for pass_name, aov_str in _pass_to_aov.items():
        if pass_name in self._render_passes:
            if not isinstance(self.integrator, AOV):
                self.integrator = AOV(aovs=[aov_str], integrator=self.integrator)
            elif aov_str not in self.integrator.aovs:
                self.integrator.aovs.append(aov_str)

y_down()

Update configuration for y-down coordinate system.

Source code in hakowan/setup/config.py
def y_down(self):
    """Update configuration for y-down coordinate system."""
    self.sensor.location = np.array([0, 0, -5])
    self.sensor.up = np.array([0, -1, 0])
    for emitter in self.emitters:
        if isinstance(emitter, Envmap):
            emitter.up = np.array([0, -1, 0])
            emitter.rotation = 180.0

y_up()

Update configuration for y-up coordinate system.

Source code in hakowan/setup/config.py
def y_up(self):
    """Update configuration for y-up coordinate system."""
    self.sensor.location = np.array([0, 0, 5])
    self.sensor.up = np.array([0, 1, 0])
    for emitter in self.emitters:
        if isinstance(emitter, Envmap):
            emitter.up = np.array([0, 1, 0])
            emitter.rotation = 180.0

z_down()

Update configuration for z-down coordinate system.

Source code in hakowan/setup/config.py
def z_down(self):
    """Update configuration for z-down coordinate system."""
    self.sensor.location = np.array([0, 5, 0])
    self.sensor.up = np.array([0, 0, -1])
    for emitter in self.emitters:
        if isinstance(emitter, Envmap):
            emitter.up = np.array([0, 0, -1])
            emitter.rotation = 180.0

z_up()

Update configuration for z-up coordinate system.

Source code in hakowan/setup/config.py
def z_up(self):
    """Update configuration for z-up coordinate system."""
    self.sensor.location = np.array([0, -5, 0])
    self.sensor.up = np.array([0, 0, 1])
    for emitter in self.emitters:
        if isinstance(emitter, Envmap):
            emitter.up = np.array([0, 0, 1])
            emitter.rotation = 180.0

Emitter

Emitter dataclass

Emitter dataclass contains lighting-related settings.

Source code in hakowan/setup/emitter.py
@dataclass(kw_only=True, slots=True)
class Emitter:
    """Emitter dataclass contains lighting-related settings."""

    pass

Envmap dataclass

Bases: Emitter

Environment light (i.e. image-based lighting).

Attributes:

Name Type Description
filename Path

Path to the environment light image file.

scale float

Scaling factor to be applied to the environment light.

up list

Up vector of the environment light.

rotation float

Rotation angle of the environment light around the up direction.

Source code in hakowan/setup/emitter.py
@dataclass(kw_only=True, slots=True)
class Envmap(Emitter):
    """Environment light (i.e. image-based lighting).

    Attributes:
        filename: Path to the environment light image file.
        scale: Scaling factor to be applied to the environment light.
        up: Up vector of the environment light.
        rotation: Rotation angle of the environment light around the up direction.
    """

    filename: Path = field(
        default_factory=lambda: Path(__file__).parents[1] / "envmaps" / "museum.exr"
    )
    scale: float = 1.0
    up: list = field(default_factory=lambda: [0, 1, 0])
    rotation: float = 180.0

Point dataclass

Bases: Emitter

Point light source.

Attributes:

Name Type Description
intensity Color | float

Light intensity.

position list[float]

Light position.

Source code in hakowan/setup/emitter.py
@dataclass(kw_only=True, slots=True)
class Point(Emitter):
    """Point light source.

    Attributes:
        intensity: Light intensity.
        position: Light position.
    """

    intensity: Color | float
    position: list[float]

Film

Film dataclass

Film dataclass stores specifications of the output image.

Attributes:

Name Type Description
width int

Width of the output image in pixels.

height int

Height of the output image in pixels.

file_format str

File format of the output image.

pixel_format str

Pixel format of the output image.

component_format str

Component format of the output image.

crop_offset NDArray | None

Offset of the crop window in pixels.

crop_size NDArray | None

Size of the crop window in pixels.

Together, width and height specify the output image resolution. crop_offset and crop_size defines a crop region. If either is None, no cropping is performed. file_format, pixel_format and component_format are for advanced user only. The default values should work in most cases.

Source code in hakowan/setup/film.py
@dataclass(kw_only=True, slots=True)
class Film:
    """Film dataclass stores specifications of the output image.

    Attributes:
        width: Width of the output image in pixels.
        height: Height of the output image in pixels.
        file_format: File format of the output image.
        pixel_format: Pixel format of the output image.
        component_format: Component format of the output image.
        crop_offset: Offset of the crop window in pixels.
        crop_size: Size of the crop window in pixels.

    Together, `width` and `height` specify the output image resolution.
    `crop_offset` and `crop_size` defines a crop region. If either is `None`, no cropping is performed.
    `file_format`, `pixel_format` and `component_format` are for advanced user only. The default
    values should work in most cases.
    """

    width: int = 1024
    height: int = 800
    file_format: str = "openexr"
    pixel_format: str = "rgba"
    component_format: str = "float16"
    crop_offset: npt.NDArray | None = None
    crop_size: npt.NDArray | None = None

Integrator

AOV dataclass

Bases: Integrator

Arbitrary output variable (AOV) integrator.

Attributes:

Name Type Description
aovs list[str]

List of AOVs to render.

integrator Integrator | None

Integrator to use for rendering AOVs.

Note

See Mitsuba doc for supported AOV types and other details.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class AOV(Integrator):
    """Arbitrary output variable (AOV) integrator.

    Attributes:
        aovs: List of AOVs to render.
        integrator: Integrator to use for rendering AOVs.

    Note:
        See
        [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_integrators.html#arbitrary-output-variables-integrator-aov)
        for supported AOV types and other details.
    """

    aovs: list[str]
    integrator: Integrator | None = None

Direct dataclass

Bases: Integrator

Direct integrator.

Attributes:

Name Type Description
shading_samples int | None

Number of shading samples.

emitter_samples int | None

Number of emitter samples.

bsdf_samples int | None

Number of BSDF samples.

Note

See Mitsuba doc for more details.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class Direct(Integrator):
    """Direct integrator.

    Attributes:
        shading_samples: Number of shading samples.
        emitter_samples: Number of emitter samples.
        bsdf_samples: Number of BSDF samples.

    Note:
        See
        [Mitsuba doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_integrators.html#direct-illumination-integrator-direct)
        for more details.
    """

    shading_samples: int | None = None
    emitter_samples: int | None = None
    bsdf_samples: int | None = None

Integrator dataclass

Integrator dataclass contains parameters of various rendering techniques.

Attributes:

Name Type Description
hide_emitters bool

Whether to hide emitters from the camera.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class Integrator:
    """Integrator dataclass contains parameters of various rendering techniques.

    Attributes:
        hide_emitters: Whether to hide emitters from the camera.
    """

    hide_emitters: bool = True

Path dataclass

Bases: Integrator

Path integrator.

Attributes:

Name Type Description
max_depth int

Maximum path depth. (-1 for unlimited)

rr_depth int

Depth at which Russian roulette starts.

Note

This integrator should work well for most surface-based scenes. See Mitsuba doc for more details.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class Path(Integrator):
    """Path integrator.

    Attributes:
        max_depth: Maximum path depth. (-1 for unlimited)
        rr_depth: Depth at which Russian roulette starts.

    Note:
        This integrator should work well for most surface-based scenes.
        See
        [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_integrators.html#path-tracer-path)
        for more details.
    """

    max_depth: int = -1
    rr_depth: int = 5

VolPath dataclass

Bases: Integrator

Volumetric path integrator.

Attributes:

Name Type Description
max_depth int

Maximum path depth. (-1 for unlimited)

rr_depth int

Depth at which Russian roulette starts.

Note

This integrator should work well for most volume-based scenes. For example, if dielectric material is involved, VolPath integrator sometimes produces better results than Path integrator.

See Mitsuba doc for more details.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class VolPath(Integrator):
    """Volumetric path integrator.

    Attributes:
        max_depth: Maximum path depth. (-1 for unlimited)
        rr_depth: Depth at which Russian roulette starts.

    Note:
        This integrator should work well for most volume-based scenes. For example, if dielectric
        material is involved, `VolPath` integrator sometimes produces better results than `Path`
        integrator.

        See
        [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_integrators.html#volumetric-path-tracer-volpath)
        for more details.
    """

    max_depth: int = -1
    rr_depth: int = 5

VolPathMIS dataclass

Bases: Integrator

Volumetric path integrator with spectral MIS.

Attributes:

Name Type Description
max_depth int

Maximum path depth. (-1 for unlimited)

rr_depth int

Depth at which Russian roulette starts.

Note

See Mitsuba doc for more details.

Source code in hakowan/setup/integrator.py
@dataclass(kw_only=True, slots=True)
class VolPathMIS(Integrator):
    """Volumetric path integrator with spectral MIS.

    Attributes:
        max_depth: Maximum path depth. (-1 for unlimited)
        rr_depth: Depth at which Russian roulette starts.

    Note:
        See
        [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_integrators.html#volumetric-path-tracer-with-spectral-mis-volpathmis)
        for more details.
    """

    max_depth: int = -1
    rr_depth: int = 5

Sampler

Independent dataclass

Bases: Sampler

Independent sampler.

Note

See Mitsuba doc for more details.

Source code in hakowan/setup/sampler.py
@dataclass(kw_only=True, slots=True)
class Independent(Sampler):
    """Independent sampler.

    Note:
        See
        [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_samplers.html#independent-sampler-independent)
        for more details.
    """

    pass

Sampler dataclass

Sampler dataclass contains sampling-related settings.

Attributes:

Name Type Description
sample_count int

Number of samples per pixel.

seed int

Seed for random number generate.

Source code in hakowan/setup/sampler.py
@dataclass(kw_only=True, slots=True)
class Sampler:
    """Sampler dataclass contains sampling-related settings.

    Attributes:
        sample_count: Number of samples per pixel.
        seed: Seed for random number generate.
    """

    sample_count: int = 256  # Samples per pixel.
    seed: int = 0

Stratified dataclass

Bases: Sampler

Stratified sampler.

Attributes:

Name Type Description
jitter bool

Whether to jitter the samples.

Note

See Mitsuba doc for more details.

Source code in hakowan/setup/sampler.py
@dataclass(kw_only=True, slots=True)
class Stratified(Sampler):
    """Stratified sampler.

    Attributes:
        jitter: Whether to jitter the samples.

    Note:
        See [Mitsuba
        doc](https://mitsuba.readthedocs.io/en/stable/src/generated/plugins_samplers.html#stratified-sampler-stratified)
        for more details.
    """

    jitter: bool = True

Sensor

Orthographic dataclass

Bases: Sensor

Orthographic camera dataclass.

Source code in hakowan/setup/sensor.py
@dataclass(kw_only=True, slots=True)
class Orthographic(Sensor):
    """Orthographic camera dataclass."""

    pass

Perspective dataclass

Bases: Sensor

Perspective camera dataclass.

Attributes:

Name Type Description
fov float

Field of view in degrees.

fov_axis str

Axis to which fov is applied. Can be "x" or "y" or "diagonal" or "smaller" or "larger".

Source code in hakowan/setup/sensor.py
@dataclass(kw_only=True, slots=True)
class Perspective(Sensor):
    """Perspective camera dataclass.

    Attributes:
        fov: Field of view in degrees.
        fov_axis: Axis to which fov is applied. Can be "x" or "y" or "diagonal" or "smaller" or "larger".
    """

    fov: float = 28.8415  # degrees
    fov_axis: str = "smaller"

Sensor dataclass

Sensor dataclass contains camera-related settings.

Attributes:

Name Type Description
location list

Camera location in world space.

target list

Camera look-at location in world space.

up list

Camera up vector in world space.

near_clip float

Near clipping plane distance.

far_clip float

Far clipping plane distance.

Source code in hakowan/setup/sensor.py
@dataclass(kw_only=True, slots=True)
class Sensor:
    """Sensor dataclass contains camera-related settings.

    Attributes:
        location: Camera location in world space.
        target: Camera look-at location in world space.
        up: Camera up vector in world space.
        near_clip: Near clipping plane distance.
        far_clip: Far clipping plane distance.
    """

    location: list = field(default_factory=lambda: [0, 0, 5])
    target: list = field(default_factory=lambda: [0, 0, 0])
    up: list = field(default_factory=lambda: [0, 1, 0])
    near_clip: float = 1e-2
    far_clip: float = 1e4

ThinLens dataclass

Bases: Perspective

Thin lens camera dataclass.

Attributes:

Name Type Description
aperture_radius float

Radius of the aperture in world space.

focus_distance float

Distance to the focal plane in world space.

Source code in hakowan/setup/sensor.py
@dataclass(kw_only=True, slots=True)
class ThinLens(Perspective):
    """Thin lens camera dataclass.

    Attributes:
        aperture_radius: Radius of the aperture in world space.
        focus_distance: Distance to the focal plane in world space.
    """

    aperture_radius: float = 0.1
    focus_distance: float = 0.0