In this section we load the complete Core module containing implementation of Tensor Product algorithm, providing a naive version and a vectorized one, written following Prof. Sestini's implementation. Her code is written in Matlab, while my implementation is in pure Python, the only dependency to run my code is numpy.
In [20]:
%load "tensorProductCore.py"
In [21]:
import numpy as np
def de_casteljau(t, control_net, return_diagonals=False):
"""
Returns a point on the Bezier curve respect a given parameter.
This function applies de Casteljau algorithm, using a
constructive approach (ie, the geometric one).
Given a control net, it produces a tuple (p, ud, ld) such that:
- p is the point on the Bezier curve that interpolate the control net;
- ud is an array containing points on the upper diagonal for curve splitting;
- ld is an array containing points on the lower diagonal for curve splitting.
Observe that to get the triple with diagonals, the argument `return_diagonals`
should be set to `True`, otherwise only a point is returned.
This function is persistent respect the given control net, ie it doesn't
change the given net but, working on a copy of it (requires little extra time to
make a copy of it, this is included in big-O complexity).
The overall complexity is O(n^2), where n is the number of control points.
"""
n, d = dimensions = np.shape(control_net)
Q = np.copy(control_net)
upper_diagonal = np.empty(dimensions)
lower_diagonal = np.empty(dimensions)
upper_diagonal[0,:] = Q[0,:]
lower_diagonal[0,:] = Q[-1,:]
for k in range(1,n):
for i in range(n-k): Q[i,:] = (1-t)*Q[i,:] + t*Q[i+1,:]
upper_diagonal[k,:] = Q[0,:]
lower_diagonal[k,:] = Q[-(k+1),:]
point = Q[0,:]
return (point, upper_diagonal, lower_diagonal) if return_diagonals else point
def de_casteljau_surface(params, control_net):
"""
Produces a point on the Bezier surface.
Examples
========
The following example works on a square control net:
>>> b00 = np.array([0, 0, 0])
>>> b01 = np.array([2, 0, 0])
>>> b02 = np.array([4, 0, 0])
>>> b10 = np.array([0, 2, 0])
>>> b11 = np.array([2, 2, 0])
>>> b12 = np.array([4, 2, 2])
>>> b20 = np.array([0, 4, 0])
>>> b21 = np.array([2, 4, 4])
>>> b22 = np.array([4, 4, 4])
>>> control_net = np.array([[b00,b01,b02],\
[b10,b11,b12],\
[b20,b21,b22]], dtype="float")
>>> u,v = .5, .5
>>> point = de_casteljau_surface((u,v), control_net)
>>> point
array([ 2., 2., 1.])
"""
control_net = np.copy(control_net)
u, v = params
rows, cols, d = np.shape(control_net)
square = min(rows, cols)
def combine_col(r, c):
return (1-v)*control_net[r, c, :] + v*control_net[r, c+1, :]
def combine_row(r, c):
return (1-u)*control_net[r, c, :] + u*control_net[r+1, c, :]
# Combine the largest square in the control net from the top-left corner
for k in range(square-1):
for r in range(square-k):
for c in range(square-k-1): control_net[r, c, :] = combine_col(r, c)
# after the previous loops we've eliminated the rightmost column
for c in range(square-k-1):
for r in range(square-k-1): control_net[r, c, :] = combine_row(r, c)
point = control_net[0, 0, :]
if cols > square:
# Combine remaining cols if any, using de Casteljau algorithm for curve case
def de_casteljau_on_col(c): return de_casteljau(u,control_net[:, c, :])
intermediate_row = np.array([de_casteljau_on_col(c)
for c in range(square-1, cols)] )
point = de_casteljau(v, np.vstack((point, intermediate_row)))
elif rows > square:
# Combine remaining rows if any, using de Casteljau algorithm for curve case
def de_casteljau_on_row(r): return de_casteljau(v,control_net[r, :, :])
intermediate_col = np.array([de_casteljau_on_row(r)
for r in range(square-1, rows)] )
point = de_casteljau(u, np.vstack((point, intermediate_col)))
else: pass # square control net already handled
return point
def naive_de_casteljau(control_net, tabs=None, squares_per_dim=20):
"""
Naive implementation of de Casteljau algorithm for tensor product patches.
"""
squares_per_dim *= 10
if tabs is None:
tabs = (np.linspace(0,1, squares_per_dim),
np.linspace(0,1, squares_per_dim))
X, Y = tabs
X, Y = np.meshgrid(X, Y)
surface = np.array([de_casteljau_surface((u,v), control_net)
for u,v in zip(np.ravel(X),np.ravel(Y))])
return surface, X.shape
def vectorized_de_casteljau(control_net, tabs=None, squares_per_dim=20):
"""
Vectorized implementation of de Casteljau algorithm for tensor product patches.
"""
squares_per_dim *= 10
if tabs is None:
tabs = (np.linspace(0,1, squares_per_dim),
np.linspace(0,1, squares_per_dim))
u_tab, v_tab = tabs
# Sestini's code calls `m` what we call `rows` and
# calls `n` what we call `cols`.
rows, cols, d = np.shape(control_net)
k = min(rows, cols)
mtab, ntab = len(u_tab), len(v_tab)
u_tab, v_tab = np.meshgrid(u_tab, v_tab)
shape = u_tab.shape
u_tm, v_tm = 1-u_tab, 1-v_tab
surface = np.zeros((mtab, ntab, d, rows, cols))
def initialize(it, jt, i_d):
surface[it, jt, i_d, :, :] = control_net[:, :, i_d]
[initialize(it, jt, i_d) for it in range(mtab)
for jt in range(ntab)
for i_d in range(d)]
def update_square(i_d, i, j):
uv_tm_tm = np.multiply(u_tm, v_tm)
uv_tm_tab = np.multiply(u_tm, v_tab)
uv_tab_tm = np.multiply(u_tab, v_tm)
uv_tab_tab = np.multiply(u_tab, v_tab)
surface[:, :, i_d, i, j] = (
np.multiply(uv_tm_tm, surface[:, :, i_d, i, j])
+ np.multiply(uv_tm_tab, surface[:, :, i_d, i, j+1])
+ np.multiply(uv_tab_tm, surface[:, :, i_d, i+1, j])
+ np.multiply(uv_tab_tab, surface[:, :, i_d, i+1, j+1]))
# Combine in square block as much as `k` allow
for s in range(1, k):
[update_square(i_d, i, j) for i_d in range(d)
for i in range(rows-s)
for j in range(cols-s) ]
def update_along_u(i_d, i):
surface[:, :, i_d, i, 0] = (np.multiply(u_tm, surface[:, :, i_d, i, 0]) +
np.multiply(u_tab, surface[:, :, i_d, i+1, 0]))
# Complete combining along `u` direction if any
for r in range(rows-k):
[update_along_u(i_d, i) for i_d in range(d)
for i in range(rows-k-r)]
def update_along_v(i_d, j):
surface[:, :, i_d, 0, j] = (np.multiply(v_tm, surface[:, :, i_d, 0, j]) +
np.multiply(v_tab, surface[:, :, i_d, 0, j+1]))
# Complete combining along `v` direction if any
for c in range(cols-k):
[update_along_v(i_d, j) for i_d in range(d)
for j in range(cols-k-c)]
# Clean parameterization values away from `surface` structure
surface = np.array([surface[i,j,:, 0, 0] for i in range(mtab)
for j in range(ntab)])
return surface, shape
In this exercise we draw a tensor product Bezier patch of a squared control net, taken from Farin, page 249.
In [1]:
import numpy as np
import tensorProductCore as tpc
import tensorProductDrawer as tpd
import timeit
b00 = np.array([0, 0, 0])
b01 = np.array([2, 0, 0])
b02 = np.array([4, 0, 0])
b10 = np.array([0, 2, 0])
b11 = np.array([2, 2, 0])
b12 = np.array([4, 2, 2])
b20 = np.array([0, 4, 0])
b21 = np.array([2, 4, 4])
b22 = np.array([4, 4, 4])
control_net = np.array([[b00, b01, b02],
[b10, b11, b12],
[b20, b21, b22]],
dtype="float")
First draw with vectorized implementation:
In [2]:
surface = tpc.vectorized_de_casteljau(control_net, squares_per_dim=50)
tpd.draw(*surface)
None
After with naive one:
In [3]:
surface = tpc.naive_de_casteljau(control_net, squares_per_dim=50)
tpd.draw(*surface)
None
Here the two surfaces are pretty equal, both respect surface shape both respect level curves, they differ only in running times, as we'll see in the next section. A difference between the two implementations is spotted in the last section about rectangular control nets.
We compare the running time of both implementation against the same control net, requiring the same number of squares per side. The first line of the following output refers to vectorized implementation, the second line refers to naive one. We see that there is factor 20 among implementations' running times.
In [5]:
%timeit tpc.vectorized_de_casteljau(control_net, squares_per_dim=50)
%timeit tpc.naive_de_casteljau(control_net, squares_per_dim=50)
In [13]:
import numpy as np
import tensorProductCore as tpc
import tensorProductDrawer as tpd
b00 = np.array([0, 0, 0])
b01 = np.array([2, 0, 0])
b02 = np.array([4, 0, 0])
b03 = np.array([6, 0, 0])
b10 = np.array([0, 2, 0])
b11 = np.array([2, 2, 4])
b12 = np.array([4, 2, 4])
b13 = np.array([6, 2, 0])
b20 = np.array([0, 4, 0])
b21 = np.array([2, 4, 4])
b22 = np.array([4, 4, 4])
b23 = np.array([6, 4, 0])
b30 = np.array([0, 6, 0])
b31 = np.array([2, 6, 0])
b32 = np.array([4, 6, 0])
b33 = np.array([6, 6, 0])
control_net = np.array([[b00,b01,b02, b03],
[b10,b11,b12, b13],
[b20,b21,b22, b23],
[b30,b31,b32, b33]],
dtype="float")
surface = tpc.naive_de_casteljau(control_net)
%timeit tpc.naive_de_casteljau(control_net)
tpd.draw(*surface)
None
In [17]:
import numpy as np
import tensorProductCore as tpc
import tensorProductDrawer as tpd
b00 = np.array([0, 0, 0])
b01 = np.array([2, 0, 0])
b02 = np.array([4, 0, 0])
b10 = np.array([0, 2, 0])
b11 = np.array([2, 2, 0])
b12 = np.array([4, 2, 2])
b20 = np.array([0, 4, 0])
b21 = np.array([2, 4, 4])
b22 = np.array([4, 4, 4])
control_net = np.array([[b00,b01],
[b10,b11],
[b20,b21]],
dtype="float")
First we draw using the vectorized implementation:
In [18]:
surface = tpc.vectorized_de_casteljau(control_net, squares_per_dim=50)
tpd.draw(*surface)
None
After, we use the naive one:
In [19]:
surface = tpc.naive_de_casteljau(control_net, squares_per_dim=50)
tpd.draw(*surface)
None
Observe that surface drawn using naive implementation is "smoother" in some sense, in particular have a look at level curves relative to $x$ and $y$ axis (ie. second and third plots). Maybe the two implementations differs regarding the way they handle the computation on the control points not in the squared subset.