In [0]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
The world around us is very complex and is made of a wide array of materials ranging from glass to wood. Each material possesses its own intrinsic properties and interacts differently with light. For instance, some are diffuse (e.g. paper or marble) and given a lighting condition, look the same from any angle. Other materials (e.g. metal) have an appearance that can vary significantly and exhibit view dependent effects such as specularities.
Modelling exactly how light interacts with materials is a complex process that involves effects like sub-surface scattering (e.g. skin) and refraction (e.g. water). In this Colab, we focus on the most common effect which is reflection. Bidirectional reflectance distribution functions (BRDF) is the method of choice when it comes to modelling reflectance. Given the direction of incoming light, BRDFs control the amount of light that bounces in the direction the surface is being observed (any gray vector in the image below).
In this Colab, a light we be shone onto three spheres, each with a material described in the image above, where the specular material is going to be modelled with the Phong specular model.
Note: This Colab covers an advanced topic and hence focuses on providing a controllable toy example to form a high level understanding of BRDFs rather than providing step by step details. For those interested, these details are nevertheless available in the code.
In [0]:
!pip install tensorflow_graphics
Now that Tensorflow Graphics is installed, let's import everything needed to run the demo contained in this notebook.
In [0]:
import math as m
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.reflectance import lambertian
from tensorflow_graphics.rendering.reflectance import phong
from tensorflow_graphics.rendering.camera import orthographic
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.geometry.representation import ray
from tensorflow_graphics.geometry.representation import vector
In [0]:
###############
# UI controls #
###############
#@title Controls { vertical-output: false, run: "auto" }
light_x_position = -0.4 #@param { type: "slider", min: -1, max: 1 , step: 0.05 }
albedo_red = 0.7 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
albedo_green = 1 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
albedo_blue = 1 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
light_red = 1 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
light_green = 1 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
light_blue = 1 #@param { type: "slider", min: 0.0, max: 1.0 , step: 0.1 }
specular_percentage = 0.25 #@param { type: "slider", min: 0, max: 1 , step: 0.01 }
shininess = 4 #@param { type: "slider", min: 0, max: 10, step: 1 }
diffuse_percentage = 1.0 - specular_percentage
dtype = np.float64
albedo = np.array((albedo_red, albedo_green, albedo_blue), dtype=dtype)
def compute_intersection_normal_sphere(image_width, image_height, sphere_radius,
sphere_center, dtype):
pixel_grid_start = np.array((0.5, 0.5), dtype=dtype)
pixel_grid_end = np.array((image_width - 0.5, image_height - 0.5), dtype=dtype)
pixel_nb = np.array((image_width, image_height))
pixels = grid.generate(pixel_grid_start, pixel_grid_end, pixel_nb)
pixel_ray = tf.math.l2_normalize(orthographic.ray(pixels), axis=-1)
zero_depth = np.zeros([image_width, image_height, 1])
pixels_3d = orthographic.unproject(pixels, zero_depth)
intersections_points, normals = ray.intersection_ray_sphere(
sphere_center, sphere_radius, pixel_ray, pixels_3d)
intersections_points = np.nan_to_num(intersections_points)
normals = np.nan_to_num(normals)
return intersections_points[0, :, :, :], normals[0, :, :, :]
#####################################
# Setup the image, sphere and light #
#####################################
# Image dimensions
image_width = 400
image_height = 300
# Sphere center and radius
sphere_radius = np.array((100.0,), dtype=dtype)
sphere_center = np.array((image_width / 2.0, image_height / 2.0, 300.0),
dtype=dtype)
# Set the light along the image plane
light_position = np.array((image_width / 2.0 + light_x_position * image_width,
image_height / 2.0, 0.0),
dtype=dtype)
vector_light_to_sphere_center = light_position - sphere_center
light_intensity_scale = vector.dot(
vector_light_to_sphere_center, vector_light_to_sphere_center,
axis=-1) * 4.0 * m.pi
light_intensity = np.array(
(light_red, light_green, light_blue)) * light_intensity_scale
################################################################################################
# For each pixel in the image, estimate the corresponding surface point and associated normal. #
################################################################################################
intersection_3d, surface_normal = compute_intersection_normal_sphere(
image_width, image_height, sphere_radius, sphere_center, dtype)
#######################################
# Reflectance and radiance estimation #
#######################################
incoming_light_direction = tf.math.l2_normalize(
intersection_3d - light_position, axis=-1)
outgoing_ray = np.array((0.0, 0.0, -1.0), dtype=dtype)
albedo = tf.broadcast_to(albedo, tf.shape(surface_normal))
# Lambertian BRDF
brdf_lambertian = diffuse_percentage * lambertian.brdf(incoming_light_direction, outgoing_ray,
surface_normal, albedo)
# Phong BRDF
brdf_phong = specular_percentage * phong.brdf(incoming_light_direction, outgoing_ray, surface_normal,
np.array((shininess,), dtype=dtype), albedo)
# Composite BRDF
brdf_composite = brdf_lambertian + brdf_phong
# Irradiance
cosine_term = vector.dot(surface_normal, -incoming_light_direction)
cosine_term = tf.math.maximum(tf.zeros_like(cosine_term), cosine_term)
vector_light_to_surface = intersection_3d - light_position
light_to_surface_distance_squared = vector.dot(
vector_light_to_surface, vector_light_to_surface, axis=-1)
irradiance = light_intensity / (4 * m.pi *
light_to_surface_distance_squared) * cosine_term
# Rendering equation
zeros = tf.zeros(intersection_3d.shape)
radiance = brdf_composite * irradiance
radiance_lambertian = brdf_lambertian * irradiance
radiance_phong = brdf_phong * irradiance
###############################
# Display the rendered sphere #
###############################
# Saturates radiances at 1 for rendering purposes.
radiance = np.minimum(radiance, 1.0)
radiance_lambertian = np.minimum(radiance_lambertian, 1.0)
radiance_phong = np.minimum(radiance_phong, 1.0)
# Gamma correction
radiance = np.power(radiance, 1.0 / 2.2)
radiance_lambertian = np.power(radiance_lambertian, 1.0 / 2.2)
radiance_phong = np.power(radiance_phong, 1.0 / 2.2)
plt.figure(figsize=(20, 20))
# Diffuse
radiance_lambertian = np.transpose(radiance_lambertian, (1, 0, 2))
ax = plt.subplot("131")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.grid(False)
ax.set_title("Lambertian")
_ = ax.imshow(radiance_lambertian)
# Specular
radiance_phong = np.transpose(radiance_phong, (1, 0, 2))
ax = plt.subplot("132")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.grid(False)
ax.set_title("Specular - Phong")
_ = ax.imshow(radiance_phong)
# Diffuse + specular
radiance = np.transpose(radiance, (1, 0, 2))
ax = plt.subplot("133")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.grid(False)
ax.set_title("Combined lambertian and specular")
_ = ax.imshow(radiance)