diff --git a/CHANGELOG b/CHANGELOG index e86a70efb0e4..b93a2fcffc33 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,6 @@ +2013-02-25 Added classes CubicTriInterpolator, UniformTriRefiner, TriAnalyzer + to matplotlib.tri module. - GBy + 2013-01-23 Add 'savefig.directory' to rcParams to remember and fill in the last directory saved to for figure save dialogs - Martin Spacek diff --git a/doc/api/tri_api.rst b/doc/api/tri_api.rst index 0a91c2549d8d..8ede2a9beb28 100644 --- a/doc/api/tri_api.rst +++ b/doc/api/tri_api.rst @@ -4,18 +4,32 @@ triangular grids :mod:`matplotlib.tri` ===================== +.. automodule:: matplotlib.tri .. autoclass:: matplotlib.tri.Triangulation - :members: + :members: .. autoclass:: matplotlib.tri.TriFinder - :members: .. autoclass:: matplotlib.tri.TrapezoidMapTriFinder - :members: __call__ + :members: __call__ + :show-inheritance: .. autoclass:: matplotlib.tri.TriInterpolator - :members: - + .. autoclass:: matplotlib.tri.LinearTriInterpolator - :members: __call__ + :members: __call__, gradient + :show-inheritance: + +.. autoclass:: matplotlib.tri.CubicTriInterpolator + :members: __call__, gradient + :show-inheritance: + +.. autoclass:: matplotlib.tri.TriRefiner + +.. autoclass:: matplotlib.tri.UniformTriRefiner + :show-inheritance: + :members: + +.. autoclass:: matplotlib.tri.TriAnalyzer + :members: diff --git a/doc/users/whats_new.rst b/doc/users/whats_new.rst index a6e86582903b..52573d38f554 100644 --- a/doc/users/whats_new.rst +++ b/doc/users/whats_new.rst @@ -75,10 +75,17 @@ conversion (`mpl.rc('svg', fonttype='none')`). Triangular grid interpolation ----------------------------- -Ian Thomas added classes to perform interpolation within triangular grids -(:class:`~matplotlib.tri.LinearTriInterpolator`) and a utility class to find +Geoffroy Billotey and Ian Thomas added classes to perform interpolation within +triangular grids: (:class:`~matplotlib.tri.LinearTriInterpolator` and +:class:`~matplotlib.tri.CubicTriInterpolator`) and a utility class to find the triangles in which points lie ( :class:`~matplotlib.tri.TrapezoidMapTriFinder`). +A helper class to perform mesh refinement and smooth contouring was also added +(:class:`~matplotlib.tri.UniformTriRefiner`). +Finally, a class implementing some basic tools for triangular mesh improvement +was added (:class:`~matplotlib.tri.TriAnalyzer`). + +.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py Left and right side axes titles ------------------------------- diff --git a/examples/pylab_examples/tricontour_smooth_delaunay.py b/examples/pylab_examples/tricontour_smooth_delaunay.py new file mode 100644 index 000000000000..27b2695b2b05 --- /dev/null +++ b/examples/pylab_examples/tricontour_smooth_delaunay.py @@ -0,0 +1,133 @@ +""" +Demonstrates high-resolution tricontouring of a random set of points ; +a matplotlib.tri.TriAnalyzer is used to improve the plot quality. + +The initial data points and triangular grid for this demo are: + - a set of random points is instantiated, inside [-1, 1] x [-1, 1] square + - A Delaunay triangulation of these points is then computed, of which a + random subset of triangles is masked out by the user (based on + *init_mask_frac* parameter). This simulates invalidated data. + +The proposed generic procedure to obtain a high resolution contouring of such +a data set is the following: + 1) Compute an extended mask with a matplotlib.tri.TriAnalyzer, which will + exclude badly shaped (flat) triangles from the border of the + triangulation. Apply the mask to the triangulation (using set_mask). + 2) Refine and interpolate the data using a + matplotlib.tri.UniformTriRefiner. + 3) Plot the refined data with tricontour. + +""" +from matplotlib.tri import Triangulation, TriAnalyzer, UniformTriRefiner +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np + + +#----------------------------------------------------------------------------- +# Analytical test function +#----------------------------------------------------------------------------- +def experiment_res(x, y): + """ An analytic function representing experiment results """ + x = 2.*x + r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2) + theta1 = np.arctan2(0.5-x, 0.5-y) + r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2) + theta2 = np.arctan2(-x-0.2, -y-0.2) + z = (4*(np.exp((r1/10)**2)-1)*30. * np.cos(3*theta1) + + (np.exp((r2/10)**2)-1)*30. * np.cos(5*theta2) + + 2*(x**2 + y**2)) + return (np.max(z)-z)/(np.max(z)-np.min(z)) + +#----------------------------------------------------------------------------- +# Generating the initial data test points and triangulation for the demo +#----------------------------------------------------------------------------- +# User parameters for data test points +n_test = 200 # Number of test data points, tested from 3 to 5000 for subdiv=3 + +subdiv = 3 # Number of recursive subdivisions of the initial mesh for smooth + # plots. Values >3 might result in a very high number of triangles + # for the refine mesh: new triangles numbering = (4**subdiv)*ntri + +init_mask_frac = 0.0 # Float > 0. adjusting the proportion of + # (invalid) initial triangles which will be masked + # out. Enter 0 for no mask. + +min_circle_ratio = .01 # Minimum circle ratio - border triangles with circle + # ratio below this will be masked if they touch a + # border. Suggested value 0.01 ; Use -1 to keep + # all triangles. + +# Random points +random_gen = np.random.mtrand.RandomState(seed=127260) +x_test = random_gen.uniform(-1., 1., size=n_test) +y_test = random_gen.uniform(-1., 1., size=n_test) +z_test = experiment_res(x_test, y_test) + +# meshing with Delaunay triangulation +tri = Triangulation(x_test, y_test) +ntri = tri.triangles.shape[0] + +# Some invalid data are masked out +mask_init = np.zeros(ntri, dtype=np.bool) +masked_tri = random_gen.randint(0, ntri, int(ntri*init_mask_frac)) +mask_init[masked_tri] = True +tri.set_mask(mask_init) + + +#----------------------------------------------------------------------------- +# Improving the triangulation before high-res plots: removing flat triangles +#----------------------------------------------------------------------------- +# masking badly shaped triangles at the border of the triangular mesh. +mask = TriAnalyzer(tri).get_flat_tri_mask(min_circle_ratio) +tri.set_mask(mask) + +# refining the data +refiner = UniformTriRefiner(tri) +tri_refi, z_test_refi = refiner.refine_field(z_test, subdiv=subdiv) + +# analytical 'results' for comparison +z_expected = experiment_res(tri_refi.x, tri_refi.y) + +# for the demo: loading the 'flat' triangles for plot +flat_tri = Triangulation(x_test, y_test) +flat_tri.set_mask(~mask) + + +#----------------------------------------------------------------------------- +# Now the plots +#----------------------------------------------------------------------------- +# User options for plots +plot_tri = True # plot of the base triangulation +plot_masked_tri = True # plot of the excessively flat excluded triangles +plot_refi_tri = False # plot of the refined triangulation +plot_expected = False # plot of the analytical function values for comparison + + +# Graphical options for tricontouring +levels = np.arange(0., 1., 0.025) +cmap = cm.get_cmap(name='Blues', lut=None) + +plt.figure() +plt.gca().set_aspect('equal') +plt.title("Filtering a Delaunay mesh\n" + + "(application to high-resolution tricontouring)") + +# 1) plot of the refined (computed) data countours: +plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap, + linewidths=[2.0, 0.5, 1.0, 0.5]) +# 2) plot of the expected (analytical) data countours (dashed): +if plot_expected: + plt.tricontour(tri_refi, z_expected, levels=levels, cmap=cmap, + linestyles='--') +# 3) plot of the fine mesh on which interpolation was done: +if plot_refi_tri: + plt.triplot(tri_refi, color='0.97') +# 4) plot of the initial 'coarse' mesh: +if plot_tri: + plt.triplot(tri, color='0.7') +# 4) plot of the unvalidated triangles from naive Delaunay Triangulation: +if plot_masked_tri: + plt.triplot(flat_tri, color='red') + +plt.show() diff --git a/examples/pylab_examples/tricontour_smooth_user.py b/examples/pylab_examples/tricontour_smooth_user.py new file mode 100644 index 000000000000..33551956aa2b --- /dev/null +++ b/examples/pylab_examples/tricontour_smooth_user.py @@ -0,0 +1,76 @@ +""" +Demonstrates high-resolution tricontouring on user-defined triangular grids +with matplotlib.tri.UniformTriRefiner +""" +from matplotlib.tri import Triangulation, UniformTriRefiner +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +import math + + +#----------------------------------------------------------------------------- +# Analytical test function +#----------------------------------------------------------------------------- +def function_z(x, y): + """ A function of 2 variables """ + r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2) + theta1 = np.arctan2(0.5-x, 0.5-y) + r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2) + theta2 = np.arctan2(-x-0.2, -y-0.2) + z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) + + (np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) + + 0.7*(x**2 + y**2)) + return (np.max(z)-z)/(np.max(z)-np.min(z)) + +#----------------------------------------------------------------------------- +# Creating a Triangulation +#----------------------------------------------------------------------------- +# First create the x and y coordinates of the points. +n_angles = 20 +n_radii = 10 +min_radius = 0.15 +radii = np.linspace(min_radius, 0.95, n_radii) + +angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False) +angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) +angles[:, 1::2] += math.pi/n_angles + +x = (radii*np.cos(angles)).flatten() +y = (radii*np.sin(angles)).flatten() +z = function_z(x, y) + +# Now create the Triangulation. +# (Creating a Triangulation without specifying the triangles results in the +# Delaunay triangulation of the points.) +triang = Triangulation(x, y) + +# Mask off unwanted triangles. +xmid = x[triang.triangles].mean(axis=1) +ymid = y[triang.triangles].mean(axis=1) +mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0) +triang.set_mask(mask) + +#----------------------------------------------------------------------------- +# Refine data +#----------------------------------------------------------------------------- +refiner = UniformTriRefiner(triang) +tri_refi, z_test_refi = refiner.refine_field(z, subdiv=3) + +#----------------------------------------------------------------------------- +# Plot the triangulation and the high-res iso-contours +#----------------------------------------------------------------------------- +plt.figure() +plt.gca().set_aspect('equal') +plt.triplot(triang, lw=0.5, color='white') + +levels = np.arange(0., 1., 0.025) +cmap = cm.get_cmap(name='terrain', lut=None) +plt.tricontourf(tri_refi, z_test_refi, levels=levels, cmap=cmap) +plt.tricontour(tri_refi, z_test_refi, levels=levels, + colors=['0.25', '0.5', '0.5', '0.5', '0.5'], + linewidths=[1.0, 0.5, 0.5, 0.5, 0.5]) + +plt.title("High-resolution tricontouring") + +plt.show() diff --git a/examples/pylab_examples/trigradient_demo.py b/examples/pylab_examples/trigradient_demo.py new file mode 100644 index 000000000000..c8967b5397be --- /dev/null +++ b/examples/pylab_examples/trigradient_demo.py @@ -0,0 +1,81 @@ +""" +Demonstrates computation of gradient with matplotlib.tri.CubicTriInterpolator. +""" +from matplotlib.tri import Triangulation, UniformTriRefiner,\ + CubicTriInterpolator +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import numpy as np +import math + + +#----------------------------------------------------------------------------- +# Electrical potential of a dipole +#----------------------------------------------------------------------------- +def dipole_potential(x, y): + """ The electric dipole potential V """ + r_sq = x**2 + y**2 + theta = np.arctan2(y, x) + z = np.cos(theta)/r_sq + return (np.max(z)-z) / (np.max(z)-np.min(z)) + + +#----------------------------------------------------------------------------- +# Creating a Triangulation +#----------------------------------------------------------------------------- +# First create the x and y coordinates of the points. +n_angles = 30 +n_radii = 10 +min_radius = 0.2 +radii = np.linspace(min_radius, 0.95, n_radii) + +angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False) +angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) +angles[:, 1::2] += math.pi/n_angles + +x = (radii*np.cos(angles)).flatten() +y = (radii*np.sin(angles)).flatten() +V = dipole_potential(x, y) + +# Create the Triangulation; no triangles specified so Delaunay triangulation +# created. +triang = Triangulation(x, y) + +# Mask off unwanted triangles. +xmid = x[triang.triangles].mean(axis=1) +ymid = y[triang.triangles].mean(axis=1) +mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0) +triang.set_mask(mask) + +#----------------------------------------------------------------------------- +# Refine data - interpolates the electrical potential V +#----------------------------------------------------------------------------- +refiner = UniformTriRefiner(triang) +tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3) + +#----------------------------------------------------------------------------- +# Computes the electrical field (Ex, Ey) as gradient of electrical potential +#----------------------------------------------------------------------------- +tci = CubicTriInterpolator(triang, -V) +# Gradient requested here at the mesh nodes but could be anywhere else: +(Ex, Ey) = tci.gradient(triang.x, triang.y) +E_norm = np.sqrt(Ex**2 + Ey**2) + +#----------------------------------------------------------------------------- +# Plot the triangulation, the potential iso-contours and the vector field +#----------------------------------------------------------------------------- +plt.figure() +plt.gca().set_aspect('equal') +plt.triplot(triang, color='0.8') + +levels = np.arange(0., 1., 0.01) +cmap = cm.get_cmap(name='hot', lut=None) +plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap, + linewidths=[2.0, 1.0, 1.0, 1.0]) +# Plots direction of the electrical vector field +plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm, + units='xy', scale=10., zorder=3, color='blue', + width=0.007, headwidth=3., headlength=4.) + +plt.title('Gradient plot: an electrical dipole') +plt.show() diff --git a/examples/pylab_examples/triinterp_demo.py b/examples/pylab_examples/triinterp_demo.py index 7aefd844c23a..5be202b8cf99 100644 --- a/examples/pylab_examples/triinterp_demo.py +++ b/examples/pylab_examples/triinterp_demo.py @@ -5,10 +5,9 @@ import matplotlib.tri as mtri import numpy as np - # Create triangulation. x = np.asarray([0, 1, 2, 3, 0.5, 1.5, 2.5, 1, 2, 1.5]) -y = np.asarray([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]) +y = np.asarray([0, 0, 0, 0, 1.0, 1.0, 1.0, 2, 2, 3.0]) triangles = [[0, 1, 4], [1, 2, 5], [2, 3, 6], [1, 5, 4], [2, 6, 5], [4, 5, 7], [5, 6, 8], [5, 8, 7], [7, 8, 9]] triang = mtri.Triangulation(x, y, triangles) @@ -20,16 +19,34 @@ zi = interp(xi, yi) # Plot the triangulation. -plt.subplot(121) +plt.subplot(221) plt.tricontourf(triang, z) plt.triplot(triang, 'ko-') plt.title('Triangular grid') # Plot interpolation to quad grid. -plt.subplot(122) +plt.subplot(222) plt.contourf(xi, yi, zi) plt.plot(xi, yi, 'k-', alpha=0.5) plt.plot(xi.T, yi.T, 'k-', alpha=0.5) plt.title('Linear interpolation') +interp2 = mtri.CubicTriInterpolator(triang, z, kind='geom') +zi2 = interp2(xi, yi) + +plt.subplot(223) +plt.contourf(xi, yi, zi2) +plt.plot(xi, yi, 'k-', alpha=0.5) +plt.plot(xi.T, yi.T, 'k-', alpha=0.5) +plt.title('Cubic interpolation (geom)') + +interp3 = mtri.CubicTriInterpolator(triang, z, kind='min_E') +zi3 = interp3(xi, yi) + +plt.subplot(224) +plt.contourf(xi, yi, zi3) +plt.plot(xi, yi, 'k-', alpha=0.5) +plt.plot(xi.T, yi.T, 'k-', alpha=0.5) +plt.title('Cubic interpolation (min_E)') + plt.show() diff --git a/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_contouring.png b/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_contouring.png new file mode 100644 index 000000000000..bbf702822e84 Binary files /dev/null and b/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_contouring.png differ diff --git a/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_gradient.png b/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_gradient.png new file mode 100644 index 000000000000..a201efee0854 Binary files /dev/null and b/lib/matplotlib/tests/baseline_images/test_triangulation/tri_smooth_gradient.png differ diff --git a/lib/matplotlib/tests/test_triangulation.py b/lib/matplotlib/tests/test_triangulation.py index 889b02bfd948..3e4eeb576c1e 100644 --- a/lib/matplotlib/tests/test_triangulation.py +++ b/lib/matplotlib/tests/test_triangulation.py @@ -3,21 +3,24 @@ import matplotlib.tri as mtri import matplotlib.delaunay as mdel from nose.tools import assert_equal -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_equal, assert_array_almost_equal,\ + assert_array_less from matplotlib.testing.decorators import image_comparison +import matplotlib.cm as cm + def test_delaunay(): # No duplicate points. - x = [0,1,1,0] - y = [0,0,1,1] + x = [0, 1, 1, 0] + y = [0, 0, 1, 1] npoints = 4 ntriangles = 2 nedges = 5 # Without duplicate points, mpl calls delaunay triangulation and # does not modify it. - mpl_triang = mtri.Triangulation(x,y) - del_triang = mdel.Triangulation(x,y) + mpl_triang = mtri.Triangulation(x, y) + del_triang = mdel.Triangulation(x, y) # Points - floating point. assert_array_almost_equal(mpl_triang.x, x) @@ -37,13 +40,14 @@ def test_delaunay(): assert_equal(np.max(mpl_triang.edges), npoints-1) assert_array_equal(mpl_triang.edges, del_triang.edge_db) + def test_delaunay_duplicate_points(): # Issue 838. import warnings # Index 2 is the same as index 0. - x = [0,1,0,1,0] - y = [0,0,0,1,1] + x = [0, 1, 0, 1, 0] + y = [0, 0, 0, 1, 1] duplicate_index = 2 npoints = 4 # Number of non-duplicate points. nduplicates = 1 @@ -53,22 +57,26 @@ def test_delaunay_duplicate_points(): # With duplicate points, mpl calls delaunay triangulation but # modified returned arrays. warnings.simplefilter("ignore") # Ignore DuplicatePointWarning. - mpl_triang = mtri.Triangulation(x,y) - del_triang = mdel.Triangulation(x,y) + mpl_triang = mtri.Triangulation(x, y) + del_triang = mdel.Triangulation(x, y) warnings.resetwarnings() # Points - floating point. assert_equal(len(mpl_triang.x), npoints + nduplicates) assert_equal(len(del_triang.x), npoints) assert_array_almost_equal(mpl_triang.x, x) - assert_array_almost_equal(del_triang.x[:duplicate_index], x[:duplicate_index]) - assert_array_almost_equal(del_triang.x[duplicate_index:], x[duplicate_index+1:]) + assert_array_almost_equal(del_triang.x[:duplicate_index], + x[:duplicate_index]) + assert_array_almost_equal(del_triang.x[duplicate_index:], + x[duplicate_index+1:]) assert_equal(len(mpl_triang.y), npoints + nduplicates) assert_equal(len(del_triang.y), npoints) assert_array_almost_equal(mpl_triang.y, y) - assert_array_almost_equal(del_triang.y[:duplicate_index], y[:duplicate_index]) - assert_array_almost_equal(del_triang.y[duplicate_index:], y[duplicate_index+1:]) + assert_array_almost_equal(del_triang.y[:duplicate_index], + y[:duplicate_index]) + assert_array_almost_equal(del_triang.y[duplicate_index:], + y[duplicate_index+1:]) # Triangles - integers. assert_equal(len(mpl_triang.triangles), ntriangles) @@ -96,6 +104,7 @@ def test_delaunay_duplicate_points(): mpl_triang.edges) assert_array_equal(del_triang.edge_db, converted_indices) + @image_comparison(baseline_images=['tripcolor1']) def test_tripcolor(): x = np.asarray([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1, 0.75]) @@ -104,7 +113,7 @@ def test_tripcolor(): [0, 1, 3], [1, 4, 3], [1, 2, 4], [2, 5, 4], [3, 4, 6], [4, 7, 6], - [4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9] ]) + [4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9]]) # Triangulation with same number of points and triangles. triang = mtri.Triangulation(x, y, triangles) @@ -123,15 +132,17 @@ def test_tripcolor(): plt.tripcolor(triang, facecolors=Cfaces, edgecolors='k') plt.title('facecolors') + def test_no_modify(): triangles = np.array([[3, 2, 0], [3, 1, 0]], dtype=np.int32) points = np.array([(0, 0), (0, 1.1), (1, 0), (1, 1)]) old_triangles = triangles.copy() - tri = mtri.Triangulation(points[:,0], points[:,1], triangles) + tri = mtri.Triangulation(points[:, 0], points[:, 1], triangles) edges = tri.edges assert_array_equal(old_triangles, triangles) + def test_trifinder(): # Test points within triangles of masked triangulation. x, y = np.meshgrid(np.arange(4), np.arange(4)) @@ -191,7 +202,7 @@ def test_trifinder(): # Test triangles with vertical colinear points. These are not valid # triangulations, but we try to deal with the simplest violations. delta = 0.0 # If +ve, triangulation is OK, if -ve triangulation invalid, - # if zero have colinear points but should pass tests anyway. + # if zero have colinear points but should pass tests anyway. x = [-1, -delta, 0, 0, 0, 0, 1] y = [1.5, 1.5, 0, 1, 2, 3, 1.5] triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5], @@ -215,7 +226,7 @@ def test_trifinder(): trifinder = triang.get_trifinder() xs = [-0.2, 0.2, 0.8, 1.2] - ys = [0.5, 0.5, 0.5, 0.5] + ys = [ 0.5, 0.5, 0.5, 0.5] tris = trifinder(xs, ys) assert_array_equal(tris, [-1, 0, 1, -1]) @@ -224,6 +235,7 @@ def test_trifinder(): tris = trifinder(xs, ys) assert_array_equal(tris, [-1, -1, 1, -1]) + def test_triinterp(): # Test points within triangles of masked triangulation. x, y = np.meshgrid(np.arange(4), np.arange(4)) @@ -238,20 +250,635 @@ def test_triinterp(): mask[8:10] = 1 triang = mtri.Triangulation(x, y, triangles, mask) linear_interp = mtri.LinearTriInterpolator(triang, z) + cubic_min_E = mtri.CubicTriInterpolator(triang, z) + cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom') xs = np.linspace(0.25, 2.75, 6) ys = [0.25, 0.75, 2.25, 2.75] - xs, ys = np.meshgrid(xs, ys) - xs = xs.ravel() - ys = ys.ravel() - zs = linear_interp(xs, ys) - assert_array_almost_equal(zs, (1.23*xs - 4.79*ys)) + xs, ys = np.meshgrid(xs, ys) # Testing arrays with array.ndim = 2 + for interp in (linear_interp, cubic_min_E, cubic_geom): + zs = interp(xs, ys) + assert_array_almost_equal(zs, (1.23*xs - 4.79*ys)) # Test points outside triangulation. xs = [-0.25, 1.25, 1.75, 3.25] ys = xs xs, ys = np.meshgrid(xs, ys) + for interp in (linear_interp, cubic_min_E, cubic_geom): + zs = linear_interp(xs, ys) + assert_array_equal(zs.mask, [[True]*4]*4) + + # Test mixed configuration (outside / inside). + xs = np.linspace(0.25, 1.75, 6) + ys = [0.25, 0.75, 1.25, 1.75] + xs, ys = np.meshgrid(xs, ys) + for interp in (linear_interp, cubic_min_E, cubic_geom): + zs = interp(xs, ys) + assert_array_almost_equal(zs, (1.23*xs - 4.79*ys)) + mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2) + assert_array_equal(zs.mask, mask) + + # 2nd order patch test: on a grid with an 'arbitrary shaped' triangle, + # patch test shall be exact for quadratic functions and cubic + # interpolator if *kind* = user + (a, b, c) = (1.23, -4.79, 0.6) + + def quad(x, y): + return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y + + def gradient_quad(x, y): + return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x) + + x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.]) + y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.]) + triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5], + [1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]]) + triang = mtri.Triangulation(x, y, triangles) + z = quad(x, y) + dz = gradient_quad(x, y) + # test points for 2nd order patch test + xs = np.linspace(0., 1., 5) + ys = np.linspace(0., 1., 5) + xs, ys = np.meshgrid(xs, ys) + cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz) + interp_zs = cubic_user(xs, ys) + assert_array_almost_equal(interp_zs, quad(xs, ys)) + (interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y) + (dzsdx, dzsdy) = gradient_quad(x, y) + assert_array_almost_equal(interp_dzsdx, dzsdx) + assert_array_almost_equal(interp_dzsdy, dzsdy) + + # Cubic improvement: cubic interpolation shall perform better than linear + # on a sufficiently dense mesh for a quadratic function. + n = 11 + x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1)) + x = x.ravel() + y = y.ravel() + z = quad(x, y) + triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1)) + xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5)) xs = xs.ravel() ys = ys.ravel() - zs = linear_interp(xs, ys) - assert_array_equal(zs.mask, [True]*16) + linear_interp = mtri.LinearTriInterpolator(triang, z) + cubic_min_E = mtri.CubicTriInterpolator(triang, z) + cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom') + zs = quad(xs, ys) + diff_lin = np.abs(linear_interp(xs, ys) - zs) + for interp in (cubic_min_E, cubic_geom): + diff_cubic = np.abs(interp(xs, ys) - zs) + assert(np.max(diff_lin) >= 10.*np.max(diff_cubic)) + assert(np.dot(diff_lin, diff_lin) >= + 100.*np.dot(diff_cubic, diff_cubic)) + + +def test_triinterpcubic_C1_continuity(): + # Below the 4 tests which demonstrate C1 continuity of the + # TriCubicInterpolator (testing the cubic shape functions on arbitrary + # triangle): + # + # 1) Testing continuity of function & derivatives at corner for all 9 + # shape functions. Testing also function values at same location. + # 2) Testing C1 continuity along each edge (as gradient is polynomial of + # 2nd order, it is sufficient to test at the middle). + # 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles + # meet) + # 4) Testing C1 continuity at median 1/3 points (midside between 2 + # subtriangles) + + # Utility test function check_continuity + def check_continuity(interpolator, loc, values=None): + """ + Checks the continuity of interpolator (and its derivatives) near + location loc. Can check the value at loc itself if *values* is + provided. + + *interpolator* TriInterpolator + *loc* location to test (x0, y0) + *values* (optional) array [z0, dzx0, dzy0] to check the value at *loc* + """ + n_star = 24 # Number of continuity points in a boundary of loc + epsilon = 1.e-10 # Distance for loc boundary + k = 100. # Continuity coefficient + (loc_x, loc_y) = loc + star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star)) + star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star)) + z = interpolator([loc_x], [loc_y])[0] + (dzx, dzy) = interpolator.gradient([loc_x], [loc_y]) + if values is not None: + assert_array_almost_equal(z, values[0]) + assert_array_almost_equal(dzx[0], values[1]) + assert_array_almost_equal(dzy[0], values[2]) + diff_z = interpolator(star_x, star_y) - z + (tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y) + diff_dzx = tab_dzx - dzx + diff_dzy = tab_dzy - dzy + assert_array_less(diff_z, epsilon*k) + assert_array_less(diff_dzx, epsilon*k) + assert_array_less(diff_dzy, epsilon*k) + + # Drawing arbitrary triangle (a, b, c) inside a unit square. + (ax, ay) = (0.2, 0.3) + (bx, by) = (0.33367, 0.80755) + (cx, cy) = (0.669, 0.4335) + x = np.array([ax, bx, cx, 0., 1., 1., 0.]) + y = np.array([ay, by, cy, 0., 0., 1., 1.]) + triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5], + [1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]]) + triang = mtri.Triangulation(x, y, triangles) + + for idof in range(9): + z = np.zeros(7, dtype=np.float64) + dzx = np.zeros(7, dtype=np.float64) + dzy = np.zeros(7, dtype=np.float64) + values = np.zeros([3, 3], dtype=np.float64) + case = idof//3 + values[case, idof % 3] = 1.0 + if case == 0: + z[idof] = 1.0 + elif case == 1: + dzx[idof % 3] = 1.0 + elif case == 2: + dzy[idof % 3] = 1.0 + interp = mtri.CubicTriInterpolator(triang, z, kind='user', + dz=(dzx, dzy)) + # Test 1) Checking values and continuity at nodes + check_continuity(interp, (ax, ay), values[:, 0]) + check_continuity(interp, (bx, by), values[:, 1]) + check_continuity(interp, (cx, cy), values[:, 2]) + # Test 2) Checking continuity at midside nodes + check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5)) + check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5)) + check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5)) + # Test 3) Checking continuity at barycenter + check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.)) + # Test 4) Checking continuity at median 1/3-point + check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.)) + check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.)) + check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.)) + + +def test_triinterpcubic_cg_solver(): + # Now 3 basic tests of the Sparse CG solver, used for + # TriCubicInterpolator with *kind* = 'min_E' + # 1) A commonly used test involves a 2d Poisson matrix. + def poisson_sparse_matrix(n, m): + """ + Sparse Poisson matrix. + + Returns the sparse matrix in coo format resulting from the + discretisation of the 2-dimensional Poisson equation according to a + finite difference numerical scheme on a uniform (n, m) grid. + Size of the matrix: (n*m, n*m) + """ + l = m*n + rows = np.concatenate([ + np.arange(l, dtype=np.int32), + np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32), + np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)]) + cols = np.concatenate([ + np.arange(l, dtype=np.int32), + np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32), + np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)]) + vals = np.concatenate([ + 4*np.ones(l, dtype=np.float64), + -np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64), + -np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)]) + # In fact +1 and -1 diags have some zeros + vals[l:2*l-1][m-1::m] = 0. + vals[2*l-1:3*l-2][m-1::m] = 0. + return vals, rows, cols, (n*m, n*m) + + # Instantiating a sparse Poisson matrix of size 48 x 48: + (n, m) = (12, 4) + mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m)) + mat.compress_csc() + mat_dense = mat.to_dense() + # Testing a sparse solve for all 48 basis vector + for itest in range(n*m): + b = np.zeros(n*m, dtype=np.float64) + b[itest] = 1. + x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m), + tol=1.e-10) + assert_array_almost_equal(np.dot(mat_dense, x), b) + + # 2) Same matrix with inserting 2 rows - cols with null diag terms + # (but still linked with the rest of the matrix by extra-diag terms) + (i_zero, j_zero) = (12, 49) + vals, rows, cols, _ = poisson_sparse_matrix(n, m) + rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero) + cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero) + # adding extra-diag terms + rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]]) + cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]]) + vals = np.concatenate([vals, [1., 1., 1., 1.]]) + mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, + (n*m + 2, n*m + 2)) + mat.compress_csc() + mat_dense = mat.to_dense() + # Testing a sparse solve for all 50 basis vec + for itest in range(n*m + 2): + b = np.zeros(n*m + 2, dtype=np.float64) + b[itest] = 1. + x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2), + tol=1.e-10) + assert_array_almost_equal(np.dot(mat_dense, x), b) + + # 3) Now a simple test that summation of duplicate (i.e. with same rows, + # same cols) entries occurs when compressed. + vals = np.ones(17, dtype=np.float64) + rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1], + dtype=np.int32) + cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2], + dtype=np.int32) + dim = (3, 3) + mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim) + mat.compress_csc() + mat_dense = mat.to_dense() + assert_array_almost_equal(mat_dense, np.array([ + [1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64)) + + +def test_triinterpcubic_geom_weights(): + # Tests to check computation of weights for _DOF_estimator_geom: + # The weight sum per triangle can be 1. (in case all angles < 90 degrees) + # or (2*w_i) where w_i = 1-alpha_i/np.pi is the weight of apex i ; alpha_i + # is the apex angle > 90 degrees. + (ax, ay) = (0., 1.687) + x = np.array([ax, 0.5*ax, 0., 1.]) + y = np.array([ay, -ay, 0., 0.]) + z = np.zeros(4, dtype=np.float64) + triangles = [[0, 2, 3], [1, 3, 2]] + sum_w = np.zeros([4, 2]) # 4 possibilities ; 2 triangles + for theta in np.linspace(0., 2*np.pi, 14): # rotating the figure... + x_rot = np.cos(theta)*x + np.sin(theta)*y + y_rot = -np.sin(theta)*x + np.cos(theta)*y + triang = mtri.Triangulation(x_rot, y_rot, triangles) + cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom') + dof_estimator = mtri.triinterpolate._DOF_estimator_geom(cubic_geom) + weights = dof_estimator.compute_geom_weights() + # Testing for the 4 possibilities... + sum_w[0, :] = np.sum(weights, 1) - 1 + for itri in range(3): + sum_w[itri+1, :] = np.sum(weights, 1) - 2*weights[:, itri] + assert_array_almost_equal(np.min(np.abs(sum_w), axis=0), + np.array([0., 0.], dtype=np.float64)) + + +def test_triinterp_colinear(): + # Tests interpolating inside a triangulation with horizontal colinear + # points (refer also to the tests :func:`test_trifinder` ). + # + # These are not valid triangulations, but we try to deal with the + # simplest violations (i. e. those handled by default TriFinder). + # + # Note that the LinearTriInterpolator and the CubicTriInterpolator with + # kind='min_E' or 'geom' still pass a linear patch test. + # We also test interpolation inside a flat triangle, by forcing + # *tri_index* in a call to :meth:`_interpolate_multikeys`. + + delta = 0. # If +ve, triangulation is OK, if -ve triangulation invalid, + # if zero have colinear points but should pass tests anyway. + x0 = np.array([1.5, 0, 1, 2, 3, 1.5, 1.5]) + y0 = np.array([-1, 0, 0, 0, 0, delta, 1]) + + # We test different affine transformations of the initial figure ; to + # avoid issues related to round-off errors we only use integer + # coefficients (otherwise the Triangulation might become invalid even with + # delta == 0). + transformations = [[1, 0], [0, 1], [1, 1], [3, 7], [-5, -2], [-3, 2]] + for transformation in transformations: + x_rot = transformation[0]*x0 + transformation[1]*y0 + y_rot = -transformation[1]*x0 + transformation[0]*y0 + (x, y) = (x_rot, y_rot) + z = 1.23*x - 4.79*y + triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5], + [3, 4, 5], [1, 5, 6], [4, 6, 5]] + triang = mtri.Triangulation(x, y, triangles) + xs = np.linspace(np.min(triang.x), np.max(triang.x), 20) + ys = np.linspace(np.min(triang.y), np.max(triang.y), 20) + xs, ys = np.meshgrid(xs, ys) + xs = xs.ravel() + ys = ys.ravel() + mask_out = (triang.get_trifinder()(xs, ys) == -1) + zs_target = np.ma.array(1.23*xs - 4.79*ys, mask=mask_out) + + linear_interp = mtri.LinearTriInterpolator(triang, z) + cubic_min_E = mtri.CubicTriInterpolator(triang, z) + cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom') + + for interp in (linear_interp, cubic_min_E, cubic_geom): + zs = interp(xs, ys) + assert_array_almost_equal(zs_target, zs) + + # Testing interpolation inside the flat triangle number 4: [2, 3, 5] + # by imposing *tri_index* in a call to :meth:`_interpolate_multikeys` + itri = 4 + pt1 = triang.triangles[itri, 0] + pt2 = triang.triangles[itri, 1] + xs = np.linspace(triang.x[pt1], triang.x[pt2], 10) + ys = np.linspace(triang.y[pt1], triang.y[pt2], 10) + zs_target = 1.23*xs - 4.79*ys + for interp in (linear_interp, cubic_min_E, cubic_geom): + zs, = interp._interpolate_multikeys( + xs, ys, tri_index=itri*np.ones(10, dtype=np.int32)) + assert_array_almost_equal(zs_target, zs) + + +def test_triinterp_transformations(): + # 1) Testing that the interpolation scheme is invariant by rotation of the + # whole figure. + # Note: This test is non-trivial for a CubicTriInterpolator with + # kind='min_E'. It does fail for a non-isotropic stiffness matrix E of + # :class:`_ReducedHCT_Element` (tested with E=np.diag([1., 1., 1.])), and + # provides a good test for :meth:`get_Kff_and_Ff`of the same class. + # + # 2) Also testing that the interpolation scheme is invariant by expansion + # of the whole figure along one axis. + n_angles = 20 + n_radii = 10 + min_radius = 0.15 + + def z(x, y): + r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2) + theta1 = np.arctan2(0.5-x, 0.5-y) + r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2) + theta2 = np.arctan2(-x-0.2, -y-0.2) + z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) + + (np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) + + 0.7*(x**2 + y**2)) + return (np.max(z)-z)/(np.max(z)-np.min(z)) + + # First create the x and y coordinates of the points. + radii = np.linspace(min_radius, 0.95, n_radii) + angles = np.linspace(0 + n_angles, 2*np.pi + n_angles, + n_angles, endpoint=False) + angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) + angles[:, 1::2] += np.pi/n_angles + x0 = (radii*np.cos(angles)).flatten() + y0 = (radii*np.sin(angles)).flatten() + triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation + z0 = z(x0, y0) + + # Then create the test points + xs0 = np.linspace(-1., 1., 23) + ys0 = np.linspace(-1., 1., 23) + xs0, ys0 = np.meshgrid(xs0, ys0) + xs0 = xs0.ravel() + ys0 = ys0.ravel() + + interp_z0 = {} + for i_angle in range(2): + # Rotating everything + theta = 2*np.pi / n_angles * i_angle + x = np.cos(theta)*x0 + np.sin(theta)*y0 + y = -np.sin(theta)*x0 + np.cos(theta)*y0 + xs = np.cos(theta)*xs0 + np.sin(theta)*ys0 + ys = -np.sin(theta)*xs0 + np.cos(theta)*ys0 + triang = mtri.Triangulation(x, y, triang0.triangles) + linear_interp = mtri.LinearTriInterpolator(triang, z0) + cubic_min_E = mtri.CubicTriInterpolator(triang, z0) + cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom') + dic_interp = {'lin': linear_interp, + 'min_E': cubic_min_E, + 'geom': cubic_geom} + # Testing that the interpolation is invariant by rotation... + for interp_key in ['lin', 'min_E', 'geom']: + interp = dic_interp[interp_key] + if i_angle == 0: + interp_z0[interp_key] = interp(xs0, ys0) # storage + else: + interpz = interp(xs, ys) + assert_array_almost_equal(interpz, interp_z0[interp_key]) + + scale_factor = 987654.3210 + for scaled_axis in ('x', 'y'): + # Scaling everything (expansion along scaled_axis) + if scaled_axis == 'x': + x = scale_factor * x0 + y = y0 + xs = scale_factor * xs0 + ys = ys0 + else: + x = x0 + y = scale_factor * y0 + xs = xs0 + ys = scale_factor * ys0 + triang = mtri.Triangulation(x, y, triang0.triangles) + linear_interp = mtri.LinearTriInterpolator(triang, z0) + cubic_min_E = mtri.CubicTriInterpolator(triang, z0) + cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom') + dic_interp = {'lin': linear_interp, + 'min_E': cubic_min_E, + 'geom': cubic_geom} + # Testing that the interpolation is invariant by expansion along + # 1 axis... + for interp_key in ['lin', 'min_E', 'geom']: + interpz = dic_interp[interp_key](xs, ys) + assert_array_almost_equal(interpz, interp_z0[interp_key]) + + +@image_comparison(baseline_images=['tri_smooth_contouring'], + extensions=['png'], remove_text=True) +def test_tri_smooth_contouring(): + # Image comparison based on example tricontour_smooth_user. + n_angles = 20 + n_radii = 10 + min_radius = 0.15 + + def z(x, y): + r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2) + theta1 = np.arctan2(0.5-x, 0.5-y) + r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2) + theta2 = np.arctan2(-x-0.2, -y-0.2) + z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) + + (np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) + + 0.7*(x**2 + y**2)) + return (np.max(z)-z)/(np.max(z)-np.min(z)) + + # First create the x and y coordinates of the points. + radii = np.linspace(min_radius, 0.95, n_radii) + angles = np.linspace(0 + n_angles, 2*np.pi + n_angles, + n_angles, endpoint=False) + angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) + angles[:, 1::2] += np.pi/n_angles + x0 = (radii*np.cos(angles)).flatten() + y0 = (radii*np.sin(angles)).flatten() + triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation + z0 = z(x0, y0) + xmid = x0[triang0.triangles].mean(axis=1) + ymid = y0[triang0.triangles].mean(axis=1) + mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0) + triang0.set_mask(mask) + + # Then the plot + refiner = mtri.UniformTriRefiner(triang0) + tri_refi, z_test_refi = refiner.refine_field(z0, subdiv=4) + levels = np.arange(0., 1., 0.025) + plt.triplot(triang0, lw=0.5, color='0.5') + plt.tricontour(tri_refi, z_test_refi, levels=levels, colors="black") + + +@image_comparison(baseline_images=['tri_smooth_gradient'], + extensions=['png'], remove_text=True) +def test_tri_smooth_gradient(): + # Image comparison based on example trigradient_demo. + + def dipole_potential(x, y): + """ An electric dipole potential V """ + r_sq = x**2 + y**2 + theta = np.arctan2(y, x) + z = np.cos(theta)/r_sq + return (np.max(z)-z) / (np.max(z)-np.min(z)) + + # Creating a Triangulation + n_angles = 30 + n_radii = 10 + min_radius = 0.2 + radii = np.linspace(min_radius, 0.95, n_radii) + angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False) + angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) + angles[:, 1::2] += np.pi/n_angles + x = (radii*np.cos(angles)).flatten() + y = (radii*np.sin(angles)).flatten() + V = dipole_potential(x, y) + triang = mtri.Triangulation(x, y) + xmid = x[triang.triangles].mean(axis=1) + ymid = y[triang.triangles].mean(axis=1) + mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0) + triang.set_mask(mask) + + # Refine data - interpolates the electrical potential V + refiner = mtri.UniformTriRefiner(triang) + tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3) + + # Computes the electrical field (Ex, Ey) as gradient of -V + tci = mtri.CubicTriInterpolator(triang, -V) + (Ex, Ey) = tci.gradient(triang.x, triang.y) + E_norm = np.sqrt(Ex**2 + Ey**2) + + # Plot the triangulation, the potential iso-contours and the vector field + plt.figure() + plt.gca().set_aspect('equal') + plt.triplot(triang, color='0.8') + + levels = np.arange(0., 1., 0.01) + cmap = cm.get_cmap(name='hot', lut=None) + plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap, + linewidths=[2.0, 1.0, 1.0, 1.0]) + # Plots direction of the electrical vector field + plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm, + units='xy', scale=10., zorder=3, color='blue', + width=0.007, headwidth=3., headlength=4.) + plt.show() + + +def test_tritools(): + # Tests TriAnalyzer.scale_factors on masked triangulation + # Tests circle_ratios on equilateral and right-angled triangle. + x = np.array([0., 1., 0.5, 0., 2.]) + y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.]) + triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32) + mask = np.array([False, False, True], dtype=np.bool) + triang = mtri.Triangulation(x, y, triangles, mask=mask) + analyser = mtri.TriAnalyzer(triang) + assert_array_almost_equal(analyser.scale_factors, + np.array([1., 1./(1.+0.5*np.sqrt(3.))])) + assert_array_almost_equal( + analyser.circle_ratios(rescale=False), + np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask)) + + # Tests circle ratio of a flat triangle + x = np.array([0., 1., 2.]) + y = np.array([1., 1.+3., 1.+6.]) + triangles = np.array([[0, 1, 2]], dtype=np.int32) + triang = mtri.Triangulation(x, y, triangles) + analyser = mtri.TriAnalyzer(triang) + assert_array_almost_equal(analyser.circle_ratios(), np.array([0.])) + + # Tests TriAnalyzer.get_flat_tri_mask + # Creates a triangulation of [-1, 1] x [-1, 1] with contiguous groups of + # 'flat' triangles at the 4 corners and at the center. Checks that only + # those at the borders are eliminated by TriAnalyzer.get_flat_tri_mask + n = 9 + + def power(x, a): + return np.abs(x)**a*np.sign(x) + + x = np.linspace(-1., 1., n+1) + x, y = np.meshgrid(power(x, 2.), power(x, 0.25)) + x = x.ravel() + y = y.ravel() + + triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1)) + analyser = mtri.TriAnalyzer(triang) + mask_flat = analyser.get_flat_tri_mask(0.2) + verif_mask = np.zeros(162, dtype=np.bool) + corners_index = [0, 1, 2, 3, 14, 15, 16, 17, 18, 19, 34, 35, 126, 127, + 142, 143, 144, 145, 146, 147, 158, 159, 160, 161] + verif_mask[corners_index] = True + assert_array_equal(mask_flat, verif_mask) + + # Now including a hole (masked triangle) at the center. The center also + # shall be eliminated by get_flat_tri_mask. + mask = np.zeros(162, dtype=np.bool) + mask[80] = True + triang.set_mask(mask) + mask_flat = analyser.get_flat_tri_mask(0.2) + center_index = [44, 45, 62, 63, 78, 79, 80, 81, 82, 83, 98, 99, 116, 117] + verif_mask[center_index] = True + assert_array_equal(mask_flat, verif_mask) + + +def test_trirefine(): + # test subdiv=2 refinement + n = 3 + subdiv = 2 + x = np.linspace(-1., 1., n+1) + x, y = np.meshgrid(x, x) + x = x.ravel() + y = y.ravel() + mask = np.zeros(2*n**2, dtype=np.bool) + mask[n**2:] = True + triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1), + mask=mask) + refiner = mtri.UniformTriRefiner(triang) + refi_triang = refiner.refine_triangulation(subdiv=subdiv) + x_refi = refi_triang.x + y_refi = refi_triang.y + + n_refi = n * subdiv**2 + x_verif = np.linspace(-1., 1., n_refi+1) + x_verif, y_verif = np.meshgrid(x_verif, x_verif) + x_verif = x_verif.ravel() + y_verif = y_verif.ravel() + ind1d = np.in1d(np.around(x_verif*(2.5+y_verif), 8), + np.around(x_refi*(2.5+y_refi), 8)) + assert_array_equal(ind1d, True) + + # tests the mask of the refined triangulation + refi_mask = refi_triang.mask + refi_tri_barycenter_x = np.sum(refi_triang.x[refi_triang.triangles], + axis=1)/3. + refi_tri_barycenter_y = np.sum(refi_triang.y[refi_triang.triangles], + axis=1)/3. + tri_finder = triang.get_trifinder() + refi_tri_indices = tri_finder(refi_tri_barycenter_x, + refi_tri_barycenter_y) + refi_tri_mask = triang.mask[refi_tri_indices] + assert_array_equal(refi_mask, refi_tri_mask) + + +def meshgrid_triangles(n): + """ + Utility function. + Returns triangles to mesh a np.meshgrid of n x n points + """ + tri = [] + for i in range(n-1): + for j in range(n-1): + a = i + j*(n) + b = (i+1) + j*n + c = i + (j+1)*n + d = (i+1) + (j+1)*n + tri += [[a, b, d], [a, d, c]] + return np.array(tri, dtype=np.int32) diff --git a/lib/matplotlib/tri/__init__.py b/lib/matplotlib/tri/__init__.py index 5624c30932de..6cf9592f040a 100644 --- a/lib/matplotlib/tri/__init__.py +++ b/lib/matplotlib/tri/__init__.py @@ -1,11 +1,12 @@ """ Unstructured triangular grid functions. """ - from __future__ import print_function from triangulation import * from tricontour import * +from tritools import * from trifinder import * from triinterpolate import * +from trirefine import * from tripcolor import * from triplot import * diff --git a/lib/matplotlib/tri/_tri.cpp b/lib/matplotlib/tri/_tri.cpp index 379e72870af0..9dd538a9c19f 100644 --- a/lib/matplotlib/tri/_tri.cpp +++ b/lib/matplotlib/tri/_tri.cpp @@ -426,50 +426,32 @@ Py::Object Triangulation::calculate_plane_coefficients(const Py::Tuple &args) // p/normal_z XYZ point0(xs[*tris], ys[*tris], zs[*tris]); tris++; - XYZ point1(xs[*tris], ys[*tris], zs[*tris]); + XYZ side01 = XYZ(xs[*tris], ys[*tris], zs[*tris]) - point0; tris++; - XYZ point2(xs[*tris], ys[*tris], zs[*tris]); + XYZ side02 = XYZ(xs[*tris], ys[*tris], zs[*tris]) - point0; tris++; - XYZ normal = (point1 - point0).cross(point2 - point0); + XYZ normal = side01.cross(side02); if (normal.z == 0.0) { // Normal is in x-y plane which means triangle consists of - // colinear points. Try to do the best we can by taking - // plane through longest side of triangle. - double length_sqr_01 = (point1 - point0).length_squared(); - double length_sqr_12 = (point2 - point1).length_squared(); - double length_sqr_20 = (point0 - point2).length_squared(); - if (length_sqr_01 > length_sqr_12) - { - if (length_sqr_01 > length_sqr_20) - normal = normal.cross(point1 - point0); - else - normal = normal.cross(point0 - point2); - } - else - { - if (length_sqr_12 > length_sqr_20) - normal = normal.cross(point2 - point1); - else - normal = normal.cross(point0 - point2); - } - - if (normal.z == 0.0) - { - // The 3 triangle points have identical x and y! The - // best we can do here is take normal = (0,0,1) and for - // the constant p take the mean of the 3 points' - // z-values. - normal = XYZ(0.0, 0.0, 1.0); - point0.z = (point0.z + point1.z + point2.z) / 3.0; - } + // colinear points. To avoid dividing by zero, we use the + // Moore-Penrose pseudo-inverse. + double sum2 = (side01.x*side01.x + side01.y*side01.y + + side02.x*side02.x + side02.y*side02.y); + double a = (side01.x*side01.z + side02.x*side02.z) / sum2; + double b = (side01.y*side01.z + side02.y*side02.z) / sum2; + *planes++ = a; + *planes++ = b; + *planes++ = point0.z - a*point0.x - b*point0.y; + } + else + { + *planes++ = -normal.x / normal.z; // x + *planes++ = -normal.y / normal.z; // y + *planes++ = normal.dot(point0) / normal.z; // constant } - - *planes++ = -normal.x / normal.z; // x - *planes++ = -normal.y / normal.z; // y - *planes++ = normal.dot(point0) / normal.z; // constant } } } diff --git a/lib/matplotlib/tri/triinterpolate.py b/lib/matplotlib/tri/triinterpolate.py index b14c91ab5894..7113c0de7ea0 100644 --- a/lib/matplotlib/tri/triinterpolate.py +++ b/lib/matplotlib/tri/triinterpolate.py @@ -1,7 +1,14 @@ +""" +Interpolation inside triangular grids. +""" from __future__ import print_function from matplotlib.tri import Triangulation from matplotlib.tri.trifinder import TriFinder +from matplotlib.tri.tritools import TriAnalyzer import numpy as np +import warnings + +__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator') class TriInterpolator(object): @@ -9,24 +16,222 @@ class TriInterpolator(object): Abstract base class for classes used to perform interpolation on triangular grids. - Derived classes implement __call__(x,y) where x,y are array_like point - coordinates of the same shape, and that returns a masked array of the same - shape containing the interpolated z-values. + Derived classes implement the following methods: + + - ``__call__(x, y)`` , + where x, y are array_like point coordinates of the same shape, and + that returns a masked array of the same shape containing the + interpolated z-values. + + - ``gradient(x, y)`` , + where x, y are array_like point coordinates of the same + shape, and that returns a list of 2 masked arrays of the same shape + containing the 2 derivatives of the interpolator (derivatives of + interpolated z values with respect to x and y). + """ def __init__(self, triangulation, z, trifinder=None): if not isinstance(triangulation, Triangulation): - raise ValueError('Expected a Triangulation object') + raise ValueError("Expected a Triangulation object") self._triangulation = triangulation self._z = np.asarray(z) if self._z.shape != self._triangulation.x.shape: - raise ValueError('z array must have same length as triangulation x' - ' and y arrays') + raise ValueError("z array must have same length as triangulation x" + " and y arrays") if trifinder is not None and not isinstance(trifinder, TriFinder): - raise ValueError('Expected a TriFinder object') + raise ValueError("Expected a TriFinder object") self._trifinder = trifinder or self._triangulation.get_trifinder() + # Default scaling factors : 1.0 (= no scaling) + # Scaling may be used for interpolations for which the order of + # magnitude of x, y has an impact on the interpolant definition. + # Please refer to :meth:`_interpolate_multikeys` for details. + self._unit_x = 1.0 + self._unit_y = 1.0 + + # Default triangle renumbering: None (= no renumbering) + # Renumbering may be used to avoid unecessary computations + # if complex calculations are done inside the Interpolator. + # Please refer to :meth:`_interpolate_multikeys` for details. + self._tri_renum = None + + # __call__ and gradient docstrings are shared by all subclasses + # (except, if needed, relevant additions). + # However these methods are only implemented in subclasses to avoid + # confusion in the documentation. + docstring__call__ = """ + Returns a masked array containing interpolated values at the specified + x,y points. + + Parameters + ---------- + x, y : array-like + x and y coordinates of the same shape and any number of + dimensions. + + Returns + ------- + z : np.ma.array + Masked array of the same shape as *x* and *y* ; values + corresponding to (*x*, *y*) points outside of the triangulation + are masked out. + + """ + + docstringgradient = """ + Returns a list of 2 masked arrays containing interpolated derivatives + at the specified x,y points. + + Parameters + ---------- + x, y : array-like + x and y coordinates of the same shape and any number of + dimensions. + + Returns + ------- + dzdx, dzdy : np.ma.array + 2 masked arrays of the same shape as *x* and *y* ; values + corresponding to (x,y) points outside of the triangulation + are masked out. + The first returned array contains the values of + :math:`\\frac{\\partial z}{\\partial x}` and the second those of + :math:`\\frac{\\partial z}{\\partial y}`. + + """ + + def _interpolate_multikeys(self, x, y, tri_index=None, + return_keys=('z',)): + """ + Versatile (private) method defined for all TriInterpolators. + + :meth:`_interpolate_multikeys` is a wrapper around method + :meth:`_interpolate_single_key` (to be defined in the child + subclasses). + :meth:`_interpolate_single_key actually performs the interpolation, + but only for 1-dimensional inputs and at valid locations (inside + unmasked triangles of the triangulation). + + The purpose of :meth:`_interpolate_multikeys` is to implement the + following common tasks needed in all subclasses implementations: + + - calculation of containing triangles + - dealing with more than one interpolation request at the same + location (e.g. if the 2 derivatives are requested, it is + unnecessary to compute the containing triangles twice) + - scaling according to self._unit_x, self._unit_y + - dealing with points outside of the grid (with fill value np.nan) + - dealing with multi-dimensionnal *x*, *y* arrays: flattening for + :meth:`_interpolate_params` call and final reshaping. + + (Note that np.vectorize could do most of those things very well for + you, but it does it by function evaluations over successive tuples of + the input arrays. Therefore, this tends to be more time consuming than + using optimized numpy functions - e.g. np.dot - which can be used + easily on the flattened inputs, in the child-subclass methods + :meth:`_interpolate_single_key`.) + + It is guaranteed that the calls to :meth:`_interpolate_single_key` + will be done with flattened (1-d) array_like input parameters `x`, `y` + and with flattened, valid `tri_index` arrays (no -1 index allowed). + + Parameters + ---------- + x, y : array_like + x and y coordinates indicating where interpolated values are + requested. + tri_index : integer array_like, optional + Array of the containing triangle indices, same shape as + *x* and *y*. Defaults to None. If None, these indices + will be computed by a TriFinder instance. + (Note: For point outside the grid, tri_index[ipt] shall be -1). + return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'} + Defines the interpolation arrays to return, and in which order. + + Returns + ------- + ret : list of arrays + Each array-like contains the expected interpolated values in the + order defined by *return_keys* parameter. + """ + # Flattening and rescaling inputs arrays x, y + # (initial shape is stored for output) + x = np.asarray(x, dtype=np.float64) + y = np.asarray(y, dtype=np.float64) + sh_ret = x.shape + if (x.shape != y.shape): + raise ValueError("x and y shall have same shapes." + " Given: {0} and {1}".format(x.shape, y.shape)) + x = np.ravel(x) + y = np.ravel(y) + x_scaled = x/self._unit_x + y_scaled = y/self._unit_y + size_ret = np.size(x_scaled) + + # Computes & ravels the element indexes, extract the valid ones. + if tri_index is None: + tri_index = self._trifinder(x, y) + else: + if (tri_index.shape != sh_ret): + raise ValueError( + "tri_index array is provided and shall" + " have same shape as x and y. Given: " + "{0} and {1}".format(tri_index.shape, sh_ret)) + tri_index = np.ravel(tri_index) + + mask_in = (tri_index != -1) + if self._tri_renum is None: + valid_tri_index = tri_index[mask_in] + else: + valid_tri_index = self._tri_renum[tri_index[mask_in]] + valid_x = x_scaled[mask_in] + valid_y = y_scaled[mask_in] + + ret = [] + for return_key in return_keys: + # Find the return index associated with the key. + try: + return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key] + except KeyError: + raise ValueError("return_keys items shall take values in" + " {'z', 'dzdx', 'dzdy'}") + + # Sets the scale factor for f & df components + scale = [1., 1./self._unit_x, 1./self._unit_y][return_index] + + # Computes the interpolation + ret_loc = np.empty(size_ret, dtype=np.float64) + ret_loc[~mask_in] = np.nan + ret_loc[mask_in] = self._interpolate_single_key( + return_key, valid_tri_index, valid_x, valid_y) * scale + ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)] + + return ret + + def _interpolate_single_key(self, return_key, tri_index, x, y): + """ + Performs the interpolation at points belonging to the triangulation + (inside an unmasked triangles). + + Parameters + ---------- + return_index : string key from {'z', 'dzdx', 'dzdy'} + Identifies the requested values (z or its derivatives) + tri_index : 1d integer array + Valid triangle index (-1 prohibited) + x, y : 1d arrays, same shape as `tri_index` + Valid locations where interpolation is requested. + + Returns + ------- + ret : 1-d array + Returned array of the same size as *tri_index* + """ + raise NotImplementedError("TriInterpolator subclasses" + + "should implement _interpolate_single_key!") + class LinearTriInterpolator(TriInterpolator): """ @@ -36,66 +241,1394 @@ class LinearTriInterpolator(TriInterpolator): point (x,y) lies on the plane of the triangle containing (x,y). Interpolated values are therefore continuous across the triangulation, but their first derivatives are discontinuous at edges between triangles. - """ - def __init__(self, triangulation, z, trifinder=None): - """ - *triangulation*: the :class:`~matplotlib.tri.Triangulation` to - interpolate over. - *z*: array_like of shape (npoints). - Array of values, defined at grid points, to interpolate between. - - *trifinder*: optional :class:`~matplotlib.tri.TriFinder` object. + Parameters + ---------- + triangulation : :class:`~matplotlib.tri.Triangulation` object + The triangulation to interpolate over. + z : array_like of shape (npoints,) + Array of values, defined at grid points, to interpolate between. + trifinder : :class:`~matplotlib.tri.TriFinder` object, optional If this is not specified, the Triangulation's default TriFinder will - be used by calling :func:`matplotlib.tri.Triangulation.get_trifinder`. - """ + be used by calling + :func:`matplotlib.tri.Triangulation.get_trifinder`. + + Methods + ------- + `__call__` (x, y) : Returns interpolated values at x,y points + `gradient` (x, y) : Returns interpolated derivatives at x,y points + + """ + def __init__(self, triangulation, z, trifinder=None): TriInterpolator.__init__(self, triangulation, z, trifinder) # Store plane coefficients for fast interpolation calculations. self._plane_coefficients = \ self._triangulation.calculate_plane_coefficients(self._z) - # Store vectorized interpolation function, so can pass in arbitrarily - # shape arrays of x, y and tri and the _single_interp function is - # called in turn with scalar x, y and tri. - self._multi_interp = np.vectorize(self._single_interp, - otypes=[np.float]) + def __call__(self, x, y): + return self._interpolate_multikeys(x, y, tri_index=None, + return_keys=('z',))[0] + __call__.__doc__ = TriInterpolator.docstring__call__ + + def gradient(self, x, y): + return self._interpolate_multikeys(x, y, tri_index=None, + return_keys=('dzdx', 'dzdy')) + gradient.__doc__ = TriInterpolator.docstringgradient + + def _interpolate_single_key(self, return_key, tri_index, x, y): + if return_key == 'z': + return (self._plane_coefficients[tri_index, 0]*x + + self._plane_coefficients[tri_index, 1]*y + + self._plane_coefficients[tri_index, 2]) + elif return_key == 'dzdx': + return self._plane_coefficients[tri_index, 0] + elif return_key == 'dzdy': + return self._plane_coefficients[tri_index, 1] + else: + raise ValueError("Invalid return_key: "+return_key) + + +class CubicTriInterpolator(TriInterpolator): + """ + A CubicTriInterpolator performs cubic interpolation on triangular grids. + + In one-dimension - on a segment - a cubic interpolating function is + defined by the values of the function and its derivative at both ends. + This is almost the same in 2-d inside a triangle, except that the values + of the function and its 2 derivatives have to be defined at each triangle + node. + + The CubicTriInterpolator takes the value of the function at each node - + provided by the user - and internally computes the value of the + derivatives, resulting in a smooth interpolation. + (As a special feature, the user can also impose the value of the + derivatives at each node, but this is not supposed to be the common + usage.) + + Parameters + ---------- + triangulation : :class:`~matplotlib.tri.Triangulation` object + The triangulation to interpolate over. + z : array_like of shape (npoints,) + Array of values, defined at grid points, to interpolate between. + kind : {'min_E', 'geom', 'user'}, optional + Choice of the smoothing algorithm, in order to compute + the interpolant derivatives (defaults to 'min_E'): + + - if 'min_E': (default) The derivatives at each node is computed + to minimize a bending energy. + - if 'geom': The derivatives at each node is computed as a + weighted average of relevant triangle normals. To be used for + speed optimization (large grids). + - if 'user': The user provides the argument `dz`, no computation + is hence needed. + + trifinder : :class:`~matplotlib.tri.TriFinder` object, optional + If not specified, the Triangulation's default TriFinder will + be used by calling + :func:`matplotlib.tri.Triangulation.get_trifinder`. + dz : tuple of array_likes (dzdx, dzdy), optional + Used only if *kind* ='user'. In this case *dz* must be provided as + (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and + are the interpolant first derivatives at the *triangulation* points. + + Methods + ------- + `__call__` (x, y) : Returns interpolated values at x,y points + `gradient` (x, y) : Returns interpolated derivatives at x,y points + + Notes + ----- + This note is a bit technical and details the way a + :class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic + interpolation. + + The interpolation is based on a Clough-Tocher subdivision scheme of + the *triangulation* mesh (to make it clearer, each triangle of the + grid will be divided in 3 child-triangles, and on each child triangle + the interpolated function is a cubic polynomial of the 2 coordinates). + This technique originates from FEM (Finite Element Method) analysis; + the element used is a reduced Hsieh-Clough-Tocher (HCT) + element. Its shape functions are described in [1]_. + The assembled function is guaranteed to be C1-smooth, i.e. it is + continuous and its first derivatives are also continuous (this + is easy to show inside the triangles but is also true when crossing the + edges). + + In the default case (*kind* ='min_E'), the interpolant minimizes a + curvature energy on the functional space generated by the HCT element + shape functions - with imposed values but arbitrary derivatives at each + node. The minimized functional is the integral of the so-called total + curvature (implementation based on an algorithm from [2]_ - PCG sparse + solver): + + .. math:: + + E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left( + \\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 + + \\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 + + 2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}} + \\right)^2 \\right) dx\\,dy + + If the case *kind* ='geom' is chosen by the user, a simple geometric + approximation is used (weighted average of the triangle normal + vectors), which could improve speed on very large grids. + + References + ---------- + .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general + Hsieh-Clough-Tocher triangles, complete or reduced.", + International Journal for Numerical Methods in Engineering, + 17(5):784 - 789. 2.01. + .. [2] C.T. Kelley, "Iterative Methods for Optimization". + + """ + def __init__(self, triangulation, z, kind='min_E', trifinder=None, + dz=None): + TriInterpolator.__init__(self, triangulation, z, trifinder) + + # Loads the underlying c++ _triangulation. + # (During loading, reordering of triangulation._triangles may occur so + # that all final triangles are now anti-clockwise) + self._triangulation.get_cpp_triangulation() + + # To build the stiffness matrix and avoid zero-energy spurious modes + # we will only store internally the valid (unmasked) triangles and + # the necessary (used) points coordinates. + # 2 renumbering tables need to be computed and stored: + # - a triangle renum table in order to translate the result from a + # TriFinder instance into the internal stored triangle number. + # - a node renum table to overwrite the self._z values into the new + # (used) node numbering. + tri_analyzer = TriAnalyzer(self._triangulation) + (compressed_triangles, compressed_x, compressed_y, tri_renum, + node_renum) = tri_analyzer._get_compressed_triangulation(True, True) + self._triangles = compressed_triangles + self._tri_renum = tri_renum + # Taking into account the node renumbering in self._z: + node_mask = (node_renum == -1) + self._z[node_renum[~node_mask]] = self._z + self._z = self._z[~node_mask] + + # Computing scale factors + self._unit_x = np.max(compressed_x) - np.min(compressed_x) + self._unit_y = np.max(compressed_y) - np.min(compressed_y) + self._pts = np.vstack((compressed_x/float(self._unit_x), + compressed_y/float(self._unit_y))).T + # Computing triangle points + self._tris_pts = self._pts[self._triangles] + # Computing eccentricities + self._eccs = self._compute_tri_eccentricities(self._tris_pts) + # Computing dof estimations for HCT triangle shape function + self._dof = self._compute_dof(kind, dz=dz) + # Loading HCT element + self._ReferenceElement = _ReducedHCT_Element() def __call__(self, x, y): + return self._interpolate_multikeys(x, y, tri_index=None, + return_keys=('z',))[0] + __call__.__doc__ = TriInterpolator.docstring__call__ + + def gradient(self, x, y): + return self._interpolate_multikeys(x, y, tri_index=None, + return_keys=('dzdx', 'dzdy')) + gradient.__doc__ = TriInterpolator.docstringgradient + """ + + Examples + -------- + An example of effective application is shown below (plot of the + direction of the vector field derivated from a known potential field): + + .. plot:: mpl_examples/pylab_examples/trigradient_demo.py + """ - Return a masked array containing linearly interpolated values at the - specified x,y points. - *x*, *y* are array_like x and y coordinates of the same shape and any - number of dimensions. + def _interpolate_single_key(self, return_key, tri_index, x, y): + tris_pts = self._tris_pts[tri_index] + alpha = self._get_alpha_vec(x, y, tris_pts) + ecc = self._eccs[tri_index] + dof = np.expand_dims(self._dof[tri_index], axis=1) + if return_key == 'z': + return self._ReferenceElement.get_function_values( + alpha, ecc, dof) + elif return_key in ['dzdx', 'dzdy']: + J = self._get_jacobian(tris_pts) + dzdx = self._ReferenceElement.get_function_derivatives( + alpha, J, ecc, dof) + if return_key == 'dzdx': + return dzdx[:, 0] + else: + return dzdx[:, 1] + else: + raise ValueError("Invalid return_key: " + return_key) - Returned masked array has the same shape as *x* and *y*; values - corresponding to (x,y) points outside of the triangulation are masked - out. + def _compute_dof(self, kind, dz=None): """ - # Check arguments. - x = np.asarray(x, dtype=np.float64) - y = np.asarray(y, dtype=np.float64) - if x.shape != y.shape: - raise ValueError("x and y must be equal-shaped arrays") + Computes and returns nodal dofs according to kind + + Parameters + ---------- + kind: {'min_E', 'geom', 'user'} + Choice of the _DOF_estimator subclass to perform the gradient + estimation. + dz: tuple of array_likes (dzdx, dzdy), optional + Used only if *kind=user ; in this case passed to the + :class:`_DOF_estimator_user`. + + Returns + ------- + dof : array_like, shape (npts,2) + Estimation of the gradient at triangulation nodes (stored as + degree of freedoms of reduced-HCT triangle elements). + """ + if kind == 'user': + if dz is None: + raise ValueError("For a CubicTriInterpolator with " + "*kind*='user', a valid *dz* " + "argument is expected.") + TE = _DOF_estimator_user(self, dz=dz) + elif kind == 'geom': + TE = _DOF_estimator_geom(self) + elif kind == 'min_E': + TE = _DOF_estimator_min_E(self) + else: + raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; " + "should be one of: " + "'user', 'geom', 'min_E'".format(kind)) + return TE.compute_dof_from_df() + + @staticmethod + def _get_alpha_vec(x, y, tris_pts): + """ + Fast (vectorized) function to compute barycentric coordinates alpha. + + Parameters + ---------- + x, y : array-like of dim 1 (shape (nx,)) + Coordinates of the points whose points barycentric + coordinates are requested + tris_pts : array like of dim 3 (shape: (nx,3,2)) + Coordinates of the containing triangles apexes. + + Returns + ------- + alpha : array of dim 2 (shape (nx,3)) + Barycentric coordinates of the points inside the containing + triangles. + """ + ndim = tris_pts.ndim-2 + + a = tris_pts[:, 1, :] - tris_pts[:, 0, :] + b = tris_pts[:, 2, :] - tris_pts[:, 0, :] + abT = np.concatenate([np.expand_dims(a, ndim+1), + np.expand_dims(b, ndim+1)], ndim+1) + ab = _transpose_vectorized(abT) + x = np.expand_dims(x, ndim) + y = np.expand_dims(y, ndim) + OM = np.concatenate([x, y], ndim)-tris_pts[:, 0, :] + + metric = _prod_vectorized(ab, abT) + # Here we try to deal with the colinear cases. + # metric_inv is in this case set to the Moore-Penrose pseudo-inverse + # meaning that we will still return a set of valid barycentric + # coordinates. + metric_inv = _pseudo_inv22sym_vectorized(metric) + Covar = _prod_vectorized(ab, _transpose_vectorized( + np.expand_dims(OM, ndim))) + ksi = _prod_vectorized(metric_inv, Covar) + alpha = _to_matrix_vectorized([ + [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]]) + return alpha + + @staticmethod + def _get_jacobian(tris_pts): + """ + Fast (vectorized) function to compute triangle jacobian matrix. + + Parameters + ---------- + tris_pts : array like of dim 3 (shape: (nx,3,2)) + Coordinates of the containing triangles apexes. + + Returns + ------- + J : array of dim 3 (shape (nx,2,2)) + Barycentric coordinates of the points inside the containing + triangles. + J[itri,:,:] is the jacobian matrix at apex 0 of the triangle + itri, so that the following (matrix) relationship holds: + [dz/dksi] = [J] x [dz/dx] + with x: global coordinates + ksi: element parametric coordinates in triangle first apex + local basis. + """ + a = np.array(tris_pts[:, 1, :]-tris_pts[:, 0, :]) + b = np.array(tris_pts[:, 2, :]-tris_pts[:, 0, :]) + J = _to_matrix_vectorized([[a[:, 0], a[:, 1]], + [b[:, 0], b[:, 1]]]) + return J + + @staticmethod + def _compute_tri_eccentricities(tris_pts): + """ + Computes triangle eccentricities + + Parameters + ---------- + tris_pts : array like of dim 3 (shape: (nx,3,2)) + Coordinates of the triangles apexes. + + Returns + ------- + ecc : array like of dim 2 (shape: (nx,3)) + The so-called eccentricity parameters [1] needed for + HCT triangular element. + """ + a = np.expand_dims(tris_pts[:, 2, :]-tris_pts[:, 1, :], axis=2) + b = np.expand_dims(tris_pts[:, 0, :]-tris_pts[:, 2, :], axis=2) + c = np.expand_dims(tris_pts[:, 1, :]-tris_pts[:, 0, :], axis=2) + # Do not use np.squeeze, this is dangerous if only one triangle + # in the triangulation... + dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0] + dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0] + dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0] + # Note that this line will raise a warning for dot_a, dot_b or dot_c + # zeros, but we choose not to support triangles with duplicate points. + return _to_matrix_vectorized([[(dot_c-dot_b)/dot_a], + [(dot_a-dot_c)/dot_b], + [(dot_b-dot_a)/dot_c]]) + + +# FEM element used for interpolation and for solving minimisation +# problem (Reduced HCT element) +class _ReducedHCT_Element(): + """ + Implementation of reduced HCT triangular element with explicit shape + functions. + + Computes z, dz, d2z and the element stiffness matrix for bending energy: + E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA) + + *** Reference for the shape functions: *** + [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or + reduced. + Michel Bernadou, Kamal Hassan + International Journal for Numerical Methods in Engineering. + 17(5):784 - 789. 2.01 + + *** Element description: *** + 9 dofs: z and dz given at 3 apex + C1 (conform) + + """ + # 1) Loads matrices to generate shape functions as a function of + # triangle eccentricities - based on [1] p.11 ''' + M = np.array([ + [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00], + [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00], + [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00], + [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50], + [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00], + [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00], + [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00], + [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]]) + M0 = np.array([ + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00], + [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]]) + M1 = np.array([ + [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) + M2 = np.array([ + [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], + [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) + + # 2) Loads matrices to rotate components of gradient & Hessian + # vectors in the reference basis of triangle first apex (a0) + rotate_dV = np.array([[ 1., 0.], [ 0., 1.], + [ 0., 1.], [-1., -1.], + [-1., -1.], [ 1., 0.]]) + + rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.], + [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.], + [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]]) + + # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2 + # exact integral - points at the middle of subtriangles apex + gauss_pts = np.array([[0.5, 0.5, 0.], [0.5, 0., 0.5], [0., 0.5, 0.5], + [4./6., 1./6., 1./6.], [1./6., 4./6., 1./6.], + [1./6., 1./6., 4./6.]]) + gauss_w = np.array([1./9., 1./9., 1./9., 2./9., 2./9., 2./9.]) + + # 4) Stiffness matrix for curvature energy + E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]]) + + # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0 + J0_to_J1 = np.array([[-1., 1.], [-1., 0.]]) + J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]]) + + def get_function_values(self, alpha, ecc, dofs): + """ + Parameters + ---------- + alpha : is a (N x 3 x 1) array (array of column-matrices) of + barycentric coordinates, + ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle + eccentricities, + dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed + degrees of freedom. + + Returns + ------- + Returns the N-array of interpolated function values. + """ + subtri = np.argmin(alpha, axis=1)[:, 0] + ksi = _roll_vectorized(alpha, -subtri, axis=0) + E = _roll_vectorized(ecc, -subtri, axis=0) + x = ksi[:, 0, 0] + y = ksi[:, 1, 0] + z = ksi[:, 2, 0] + x_sq = x*x + y_sq = y*y + z_sq = z*z + V = _to_matrix_vectorized([ + [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x], + [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]]) + prod = _prod_vectorized(self.M, V) + prod += _scalar_vectorized(E[:, 0, 0], + _prod_vectorized(self.M0, V)) + prod += _scalar_vectorized(E[:, 1, 0], + _prod_vectorized(self.M1, V)) + prod += _scalar_vectorized(E[:, 2, 0], + _prod_vectorized(self.M2, V)) + s = _roll_vectorized(prod, 3*subtri, axis=0) + return _prod_vectorized(dofs, s)[:, 0, 0] + + def get_function_derivatives(self, alpha, J, ecc, dofs): + """ + Parameters + ---------- + *alpha* is a (N x 3 x 1) array (array of column-matrices of + barycentric coordinates) + *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at + triangle first apex) + *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle + eccentricities) + *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed + degrees of freedom. + + Returns + ------- + Returns the values of interpolated function derivatives [dz/dx, dz/dy] + in global coordinates at locations alpha, as a column-matrices of + shape (N x 2 x 1). + """ + subtri = np.argmin(alpha, axis=1)[:, 0] + ksi = _roll_vectorized(alpha, -subtri, axis=0) + E = _roll_vectorized(ecc, -subtri, axis=0) + x = ksi[:, 0, 0] + y = ksi[:, 1, 0] + z = ksi[:, 2, 0] + x_sq = x*x + y_sq = y*y + z_sq = z*z + dV = _to_matrix_vectorized([ + [-3.*x_sq, -3.*x_sq], + [3.*y_sq, 0.], + [0., 3.*z_sq], + [-2.*x*z, -2.*x*z+x_sq], + [-2.*x*y+x_sq, -2.*x*y], + [2.*x*y-y_sq, -y_sq], + [2.*y*z, y_sq], + [z_sq, 2.*y*z], + [-z_sq, 2.*x*z-z_sq], + [x*z-y*z, x*y-y*z]]) + # Puts back dV in first apex basis + dV = _prod_vectorized(dV, _extract_submatrices( + self.rotate_dV, subtri, block_size=2, axis=0)) + + prod = _prod_vectorized(self.M, dV) + prod += _scalar_vectorized(E[:, 0, 0], + _prod_vectorized(self.M0, dV)) + prod += _scalar_vectorized(E[:, 1, 0], + _prod_vectorized(self.M1, dV)) + prod += _scalar_vectorized(E[:, 2, 0], + _prod_vectorized(self.M2, dV)) + dsdksi = _roll_vectorized(prod, 3*subtri, axis=0) + dfdksi = _prod_vectorized(dofs, dsdksi) + # In global coordinates: + # Here we try to deal with the simpliest colinear cases, returning a + # null matrix. + J_inv = _safe_inv22_vectorized(J) + dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi)) + return dfdx + + def get_function_hessians(self, alpha, J, ecc, dofs): + """ + Parameters + ---------- + *alpha* is a (N x 3 x 1) array (array of column-matrices) of + barycentric coordinates + *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at + triangle first apex) + *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle + eccentricities + *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed + degrees of freedom. + + Returns + ------- + Returns the values of interpolated function 2nd-derivatives + [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha, + as a column-matrices of shape (N x 3 x 1). + """ + d2sdksi2 = self.get_d2Sidksij2(alpha, ecc) + d2fdksi2 = _prod_vectorized(dofs, d2sdksi2) + H_rot = self.get_Hrot_from_J(J) + d2fdx2 = _prod_vectorized(d2fdksi2, H_rot) + return _transpose_vectorized(d2fdx2) + + def get_d2Sidksij2(self, alpha, ecc): + """ + Parameters + ---------- + *alpha* is a (N x 3 x 1) array (array of column-matrices) of + barycentric coordinates + *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle + eccentricities + + Returns + ------- + Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions + expressed in covariante coordinates in first apex basis. + """ + subtri = np.argmin(alpha, axis=1)[:, 0] + ksi = _roll_vectorized(alpha, -subtri, axis=0) + E = _roll_vectorized(ecc, -subtri, axis=0) + x = ksi[:, 0, 0] + y = ksi[:, 1, 0] + z = ksi[:, 2, 0] + d2V = _to_matrix_vectorized([ + [6.*x, 6.*x, 6.*x], + [6.*y, 0., 0.], + [0., 6.*z, 0.], + [2.*z, 2.*z-4.*x, 2.*z-2.*x], + [2.*y-4.*x, 2.*y, 2.*y-2.*x], + [2.*x-4.*y, 0., -2.*y], + [2.*z, 0., 2.*y], + [0., 2.*y, 2.*z], + [0., 2.*x-4.*z, -2.*z], + [-2.*z, -2.*y, x-y-z]]) + # Puts back d2V in first apex basis + d2V = _prod_vectorized(d2V, _extract_submatrices( + self.rotate_d2V, subtri, block_size=3, axis=0)) + prod = _prod_vectorized(self.M, d2V) + prod += _scalar_vectorized(E[:, 0, 0], + _prod_vectorized(self.M0, d2V)) + prod += _scalar_vectorized(E[:, 1, 0], + _prod_vectorized(self.M1, d2V)) + prod += _scalar_vectorized(E[:, 2, 0], + _prod_vectorized(self.M2, d2V)) + d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0) + return d2sdksi2 + + def get_bending_matrices(self, J, ecc): + """ + Parameters + ---------- + *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at + triangle first apex) + *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle + eccentricities + + Returns + ------- + Returns the element K matrices for bending energy expressed in + GLOBAL nodal coordinates. + K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA] + tri_J is needed to rotate dofs from local basis to global basis + """ + n = np.size(ecc, 0) + + # 1) matrix to rotate dofs in global coordinates + J1 = _prod_vectorized(self.J0_to_J1, J) + J2 = _prod_vectorized(self.J0_to_J2, J) + DOF_rot = np.zeros([n, 9, 9], dtype=np.float64) + DOF_rot[:, 0, 0] = 1 + DOF_rot[:, 3, 3] = 1 + DOF_rot[:, 6, 6] = 1 + DOF_rot[:, 1:3, 1:3] = J + DOF_rot[:, 4:6, 4:6] = J1 + DOF_rot[:, 7:9, 7:9] = J2 + + # 2) matrix to rotate Hessian in global coordinates. + H_rot, area = self.get_Hrot_from_J(J, return_area=True) - # Indices of triangles containing x, y points, or -1 for no triangles. - tris = self._trifinder(x, y) + # 3) Computes stiffness matrix + # Integration according to Gauss P2 rule for each subtri. + K = np.zeros([n, 9, 9], dtype=np.float64) + weights = self.gauss_w + pts = self.gauss_pts + for igauss in range(6): + alpha = np.tile(pts[igauss, :], n).reshape(n, 3) + alpha = np.expand_dims(alpha, 3) + weight = weights[igauss] + d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc) + d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot) + K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E), + _transpose_vectorized(d2Skdx2)) - # Perform interpolation. - z = self._multi_interp(x, y, tris) + # 4) With nodal (not elem) dofs + K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot), + K), DOF_rot) - # Return masked array. - return np.ma.masked_invalid(z, copy=False) + # 5) Need the area to compute total element energy + return _scalar_vectorized(area, K) - def _single_interp(self, x, y, tri): + def get_Hrot_from_J(self, J, return_area=False): """ - Return single interpolated value at specified (*x*, *y*) coordinates - within triangle with index *tri*. Returns np.nan if tri == -1. + Parameters + ---------- + *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at + triangle first apex) + + Returns + ------- + Returns H_rot used to rotate Hessian from local to global coordinates. + if *return_area* is True, returns also the triangle area (0.5*det(J)) """ - if tri == -1: - return np.nan + # Here we try to deal with the simpliest colinear cases ; a null + # energy and area is imposed. + J_inv = _safe_inv22_vectorized(J) + Ji00 = J_inv[:, 0, 0] + Ji11 = J_inv[:, 1, 1] + Ji10 = J_inv[:, 1, 0] + Ji01 = J_inv[:, 0, 1] + H_rot = _to_matrix_vectorized([ + [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10], + [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11], + [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]]) + if not return_area: + return H_rot else: - return (self._plane_coefficients[tri,0] * x + - self._plane_coefficients[tri,1] * y + - self._plane_coefficients[tri,2]) + area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0]) + return H_rot, area + + def get_Kff_and_Ff(self, J, ecc, triangles, Uc): + """ + Builds K and F for the following elliptic formulation: + minimization of curvature energy with value of function at node + imposed and derivatives 'free'. + Builds the global Kff matrix in cco format. + Builds the full Ff vec Ff = - Kfc x Uc + + Parameters + ---------- + *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at + triangle first apex) + *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle + eccentricities + *triangles* is a (N x 3) array of nodes indexes. + *Uc* is (N x 3) array of imposed displacements at nodes + + Returns + ------- + (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate + (row, col) entries must be summed. + Ff: force vector - dim npts * 3 + """ + ntri = np.size(ecc, 0) + vec_range = np.arange(ntri, dtype=np.int32) + c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1 + f_dof = [1, 2, 4, 5, 7, 8] + c_dof = [0, 3, 6] + + # vals, rows and cols indices in global dof numbering + f_dof_indices = _to_matrix_vectorized([[ + c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1, + c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1, + c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]]) + + expand_indices = np.ones([ntri, 9, 1], dtype=np.int32) + f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices), + _transpose_vectorized(expand_indices)) + f_col_indices = _prod_vectorized(expand_indices, f_dof_indices) + K_elem = self.get_bending_matrices(J, ecc) + + # Extracting sub-matrices + # Explanation & notations: + # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx) + # * Subscript c denotes 'condensated' (imposed) degrees of freedom + # (i.e. z at all nodes) + # * F = [Ff, Fc] is the force vector + # * U = [Uf, Uc] is the imposed dof vector + # [ Kff Kfc ] + # * K = [ ] is the laplacian stiffness matrix + # [ Kcf Kff ] + # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc + + # Computing Kff stiffness matrix in sparse coo format + Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)]) + Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)]) + Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)]) + + # Computing Ff force vector in sparse coo format + Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)] + Uc_elem = np.expand_dims(Uc, axis=2) + Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0] + Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :] + + # Extracting Ff force vector in dense format + # We have to sum duplicate indices - using bincount + Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem)) + return Kff_rows, Kff_cols, Kff_vals, Ff + + +# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom, +# _DOF_estimator_min_E +# Private classes used to compute the degree of freedom of each triangular +# element for the TriCubicInterpolator. +class _DOF_estimator(): + """ + Abstract base class for classes used to perform estimation of a function + first derivatives, and deduce the dofs for a CubicTriInterpolator using a + reduced HCT element formulation. + Derived classes implement compute_df(self,**kwargs), returning + np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2 + gradient coordinates. + """ + def __init__(self, interpolator, **kwargs): + if not isinstance(interpolator, CubicTriInterpolator): + raise ValueError("Expected a CubicTriInterpolator object") + self._pts = interpolator._pts + self._tris_pts = interpolator._tris_pts + self.z = interpolator._z + self._triangles = interpolator._triangles + (self._unit_x, self._unit_y) = (interpolator._unit_x, + interpolator._unit_y) + self.dz = self.compute_dz(**kwargs) + self.compute_dof_from_df() + + def compute_dz(self, **kwargs): + raise NotImplementedError + + def compute_dof_from_df(self): + """ + Computes reduced-HCT elements degrees of freedom, knowing the + gradient. + """ + J = CubicTriInterpolator._get_jacobian(self._tris_pts) + tri_z = self.z[self._triangles, :] + tri_dz = self.dz[self._triangles, :] + tri_dof = self.get_dof_vec(tri_z, tri_dz, J) + return tri_dof + + @staticmethod + def get_dof_vec(tri_z, tri_dz, J): + """ + Computes the dof vector of a triangle, knowing the value of f, df and + of the local Jacobian at each node. + + *tri_z*: array of shape (3,) of f nodal values + *tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values + *J*: Jacobian matrix in local basis of apex 0 + + Returns dof array of shape (9,) so that for each apex iapex: + dof[iapex*3+0] = f(Ai) + dof[iapex*3+1] = df(Ai).(AiAi+) + dof[iapex*3+2] = df(Ai).(AiAi-)] + """ + npt = tri_z.shape[0] + dof = np.zeros([npt, 9], dtype=np.float64) + J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J) + J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J) + + col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=3)) + col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=3)) + col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=3)) + + dfdksi = _to_matrix_vectorized([ + [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]], + [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]]) + dof[:, 0:7:3] = tri_z + dof[:, 1:8:3] = dfdksi[:, 0] + dof[:, 2:9:3] = dfdksi[:, 1] + return dof + + +class _DOF_estimator_user(_DOF_estimator): + """ dz is imposed by user / Accounts for scaling if any """ + def compute_dz(self, dz): + (dzdx, dzdy) = dz + dzdx = dzdx * self._unit_x + dzdy = dzdy * self._unit_y + return np.vstack([dzdx, dzdy]).T + + +class _DOF_estimator_geom(_DOF_estimator): + """ Fast 'geometric' approximation, recommended for large arrays. """ + def compute_dz(self): + """ + self.df is computed as weighted average of _triangles sharing a common + node. On each triangle itri f is first assumed linear (= ~f), which + allows to compute d~f[itri] + Then the following approximation of df nodal values is then proposed: + f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt) + The weighted coeff. w[itri] are proportional to the angle of the + triangle itri at apex ipt + """ + el_geom_w = self.compute_geom_weights() + el_geom_grad = self.compute_geom_grads() + + # Sum of weights coeffs + w_node_sum = np.bincount(np.ravel(self._triangles), + weights=np.ravel(el_geom_w)) + + # Sum of weighted df = (dfx, dfy) + dfx_el_w = np.empty_like(el_geom_w) + dfy_el_w = np.empty_like(el_geom_w) + for iapex in range(3): + dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0] + dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1] + dfx_node_sum = np.bincount(np.ravel(self._triangles), + weights=np.ravel(dfx_el_w)) + dfy_node_sum = np.bincount(np.ravel(self._triangles), + weights=np.ravel(dfy_el_w)) + + # Estimation of df + dfx_estim = dfx_node_sum/w_node_sum + dfy_estim = dfy_node_sum/w_node_sum + return np.vstack([dfx_estim, dfy_estim]).T + + def compute_geom_weights(self): + """ + Builds the (nelems x 3) weights coeffs of _triangles angles, + renormalized so that np.sum(weights, axis=1) == np.ones(nelems) + """ + weights = np.zeros([np.size(self._triangles, 0), 3]) + tris_pts = self._tris_pts + for ipt in range(3): + p0 = tris_pts[:, (ipt) % 3, :] + p1 = tris_pts[:, (ipt+1) % 3, :] + p2 = tris_pts[:, (ipt-1) % 3, :] + alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0]) + alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0]) + # In the below formula we could take modulo 2. but + # modulo 1. is safer regarding round-off errors (flat triangles). + angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.)) + # Weight proportional to angle up np.pi/2 ; null weight for + # degenerated cases 0. and np.pi (Note that `angle` is normalized + # by np.pi) + weights[:, ipt] = 0.5 - np.abs(angle-0.5) + return weights + + def compute_geom_grads(self): + """ + Compute the (global) gradient component of f assumed linear (~f). + returns array df of shape (nelems,2) + df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz + """ + tris_pts = self._tris_pts + tris_f = self.z[self._triangles] + + dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :] + dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :] + dM = np.dstack([dM1, dM2]) + # Here we try to deal with the simpliest colinear cases: a null + # gradient is assumed in this case. + dM_inv = _safe_inv22_vectorized(dM) + + dZ1 = tris_f[:, 1] - tris_f[:, 0] + dZ2 = tris_f[:, 2] - tris_f[:, 0] + dZ = np.vstack([dZ1, dZ2]).T + df = np.empty_like(dZ) + + # With np.einsum : could be ej,eji -> ej + df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0] + df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1] + return df + + +class _DOF_estimator_min_E(_DOF_estimator_geom): + """ + The 'smoothest' approximation, df is computed through global minimization + of the bending energy: + E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA] + """ + def __init__(self, Interpolator): + self._eccs = Interpolator._eccs + _DOF_estimator_geom.__init__(self, Interpolator) + + def compute_dz(self): + """ + Elliptic solver for bending energy minimization. + Uses a dedicated 'toy' sparse Jacobi PCG solver. + """ + # Initial guess for iterative PCG solver. + dz_init = _DOF_estimator_geom.compute_dz(self) + Uf0 = np.ravel(dz_init) + + reference_element = _ReducedHCT_Element() + J = CubicTriInterpolator._get_jacobian(self._tris_pts) + eccs = self._eccs + triangles = self._triangles + Uc = self.z[self._triangles] + + # Building stiffness matrix and force vector in coo format + Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff( + J, eccs, triangles, Uc) + + # Building sparse matrix and solving minimization problem + # We could use scipy.sparse direct solver ; however to avoid this + # external dependency an implementation of a simple PCG solver with + # a simplendiagonal Jocabi preconditioner is implemented. + tol = 1.e-10 + n_dof = Ff.shape[0] + Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols, + shape=(n_dof, n_dof)) + Kff_coo.compress_csc() + Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol) + # If the PCG did not converge, we return the best guess between Uf0 + # and Uf. + err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff) + if err0 < err: + # Maybe a good occasion to raise a warning here ? + warnings.warn("In TriCubicInterpolator initialization, PCG sparse" + " solver did not converge after 1000 iterations. " + "`geom` approximation is used instead of `min_E`") + Uf = Uf0 + + # Building dz from Uf + dz = np.empty([self._pts.shape[0], 2], dtype=np.float64) + dz[:, 0] = Uf[::2] + dz[:, 1] = Uf[1::2] + return dz + + +# The following private :class:_Sparse_Matrix_coo and :func:_cg provide +# a PCG sparse solver for (symmetric) elliptic problems. +class _Sparse_Matrix_coo: + def __init__(self, vals, rows, cols, shape): + """ + Creates a sparse matrix in coo format + *vals*: arrays of values of non-null entries of the matrix + *rows*: int arrays of rows of non-null entries of the matrix + *cols*: int arrays of cols of non-null entries of the matrix + *shape*: 2-tuple (n,m) of matrix shape + + """ + self.n, self.m = shape + self.vals = np.asarray(vals, dtype=np.float64) + self.rows = np.asarray(rows, dtype=np.int32) + self.cols = np.asarray(cols, dtype=np.int32) + + def dot(self, V): + """ + Dot product of self by a vector *V* in sparse-dense to dense format + *V* dense vector of shape (self.m,) + """ + assert V.shape == (self.m,) + # For a more generic implementation we could use below kw argument + # minlength=self.m of bincount ; however: + # - it is new in numpy 1.6 + # - it is unecessary when each row have at least 1 entry in global + # matrix, which is the case here. + return np.bincount(self.rows, weights=self.vals*V[self.cols]) + + def compress_csc(self): + """ + Compress rows, cols, vals / summing duplicates. Sort for csc format. + """ + _, unique, indices = np.unique( + self.rows + self.n*self.cols, + return_index=True, return_inverse=True) + self.rows = self.rows[unique] + self.cols = self.cols[unique] + self.vals = np.bincount(indices, weights=self.vals) + + def compress_csr(self): + """ + Compress rows, cols, vals / summing duplicates. Sort for csr format. + """ + _, unique, indices = np.unique( + self.m*self.rows + self.cols, + return_index=True, return_inverse=True) + self.rows = self.rows[unique] + self.cols = self.cols[unique] + self.vals = np.bincount(indices, weights=self.vals) + + def to_dense(self): + """ + Returns a dense matrix representing self. + Mainly for debugging purposes. + """ + ret = np.zeros([self.n, self.m], dtype=np.float64) + nvals = self.vals.size + for i in range(nvals): + ret[self.rows[i], self.cols[i]] += self.vals[i] + return ret + + def __str__(self): + return self.to_dense().__str__() + + @property + def diag(self): + """ + Returns the (dense) vector of the diagonal elements. + """ + in_diag = (self.rows == self.cols) + diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0. + diag[self.rows[in_diag]] = self.vals[in_diag] + return diag + + +def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000): + """ + Use Preconditioned Conjugate Gradient iteration to solve A x = b + A simple Jacobi (diagonal) preconditionner is used. + + Parameters + ---------- + A: _Sparse_Matrix_coo + *A* must have been compressed before by compress_csc or + compress_csr method. + + b: array + Right hand side of the linear system. + + Returns + ---------- + x: array. + The converged solution. + err: float + The absolute error np.linalg.norm(A.dot(x) - b) + + Other parameters + ---------- + x0: array. + Starting guess for the solution. + tol: float. + Tolerance to achieve. The algorithm terminates when the relative + residual is below tol. + maxiter: integer. + Maximum number of iterations. Iteration will stop + after maxiter steps even if the specified tolerance has not + been achieved. + """ + n = b.size + assert A.n == n + assert A.m == n + b_norm = np.linalg.norm(b) + + # Jacobi pre-conditioner + kvec = A.diag + # For diag elem < 1e-6 we keep 1e-6. + kvec = np.where(kvec > 1.e-6, kvec, 1.e-6) + + # Initial guess + if x0 is None: + x = np.zeros(n) + else: + x = x0 + + r = b - A.dot(x) + w = r/kvec + + p = np.zeros(n) + beta = 0.0 + rho = np.dot(r, w) + k = 0 + + # Following C. T. Kelley + while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter): + p = w+beta*p + z = A.dot(p) + alpha = rho/np.dot(p, z) + r = r - alpha*z + w = r/kvec + rhoold = rho + rho = np.dot(r, w) + x = x + alpha*p + beta = rho/rhoold + #err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used + k += 1 + err = np.linalg.norm(A.dot(x) - b) + return x, err + + +# The following private functions: +# :func:`_inv22_vectorized` +# :func:`_safe_inv22_vectorized` +# :func:`_pseudo_inv22sym_vectorized` +# :func:`_prod_vectorized` +# :func:`_scalar_vectorized` +# :func:`_transpose_vectorized` +# :func:`_roll_vectorized` +# :func:`_to_matrix_vectorized` +# :func:`_extract_submatrices` +# provide fast numpy implementation of some standard operations on arrays of +# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays. +def _inv22_vectorized(M): + """ + Inversion of arrays of (2,2) matrices. + """ + assert (M.ndim == 3) + assert (M.shape[-2:] == (2, 2)) + M_inv = np.empty_like(M) + delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0]) + M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv + M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv + M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv + M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv + return M_inv + + +# Development note: Dealing with pathologic 'flat' triangles in the +# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions +# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`. +# +# Goals: +# 1) The CubicTriInterpolator should be able to handle flat or almost flat +# triangles without raising an error, +# 2) These degenerated triangles should have no impact on the automatic dof +# calculation (associated with null weight for the _DOF_estimator_geom and +# with null energy for the _DOF_estimator_min_E), +# 3) Linear patch test should be passed exactly on degenerated meshes, +# 4) Interpolation (with :meth:`_interpolate_single_key` or +# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside* +# the pathologic triangles, to interact correctly with a TriRefiner class. +# +# Difficulties: +# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and +# *metric* (the metric tensor = J x J.T). Computation of the local +# tangent plane is also problematic. +# +# Implementation: +# Most of the time, when computing the inverse of a rank-deficient matrix it +# is safe to simply return the null matrix (which is the implementation in +# :func:`_safe_inv22_vectorized`). This is because of point 2), itself +# enforced by: +# - null area hence null energy in :class:`_DOF_estimator_min_E` +# - angles close or equal to 0 or np.pi hence null weight in +# :class:`_DOF_estimator_geom`. +# Note that the function angle -> weight is continuous and maximum for an +# angle np.pi/2 (refer to :meth:`compute_geom_weights`) +# The exception is the computation of barycentric coordinates, which is done +# by inversion of the *metric* matrix. In this case, we need to compute a set +# of valid coordinates (1 among numerous possibilities), to ensure point 4). +# We benefit here from the symmetry of metric = J x J.T, which makes it easier +# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized` +def _safe_inv22_vectorized(M): + """ + Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient + matrices. + + *M* : array of (2,2) matrices to inverse, shape (n,2,2) + """ + assert M.ndim == 3 + assert M.shape[-2:] == (2, 2) + M_inv = np.empty_like(M) + prod1 = M[:, 0, 0]*M[:, 1, 1] + delta = prod1 - M[:, 0, 1]*M[:, 1, 0] + + # We set delta_inv to 0. in case of a rank deficient matrix ; a + # rank-deficient input matrix *M* will lead to a null matrix in output + rank2 = (np.abs(delta) > 1e-8*np.abs(prod1)) + if np.all(rank2): + # Normal 'optimized' flow. + delta_inv = 1./delta + else: + # 'Pathologic' flow. + delta_inv = np.zeros(M.shape[0]) + delta_inv[rank2] = 1./delta[rank2] + + M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv + M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv + M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv + M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv + return M_inv + + +def _pseudo_inv22sym_vectorized(M): + """ + Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the + (Moore-Penrose) pseudo-inverse for rank-deficient matrices. + + In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal + projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2 + In case M is of rank 0, we return the null matrix. + + *M* : array of (2,2) matrices to inverse, shape (n,2,2) + """ + assert M.ndim == 3 + assert M.shape[-2:] == (2, 2) + M_inv = np.empty_like(M) + prod1 = M[:, 0, 0]*M[:, 1, 1] + delta = prod1 - M[:, 0, 1]*M[:, 1, 0] + rank2 = (np.abs(delta) > 1e-8*np.abs(prod1)) + + if np.all(rank2): + # Normal 'optimized' flow. + M_inv[:, 0, 0] = M[:, 1, 1] / delta + M_inv[:, 0, 1] = -M[:, 0, 1] / delta + M_inv[:, 1, 0] = -M[:, 1, 0] / delta + M_inv[:, 1, 1] = M[:, 0, 0] / delta + else: + # 'Pathologic' flow. + # Here we have to deal with 2 sub-cases + # 1) First sub-case: matrices of rank 2: + delta = delta[rank2] + M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta + M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta + M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta + M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta + # 2) Second sub-case: rank-deficient matrices of rank 0 and 1: + rank01 = ~rank2 + tr = M[rank01, 0, 0] + M[rank01, 1, 1] + tr_zeros = (np.abs(tr) < 1.e-8) + sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros) + #sq_tr_inv = 1. / tr**2 + M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv + M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv + M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv + M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv + + return M_inv + + +def _prod_vectorized(M1, M2): + """ + Matrix product between arrays of matrices, or a matrix and an array of + matrices (*M1* and *M2*) + """ + sh1 = M1.shape + sh2 = M2.shape + assert len(sh1) >= 2 + assert len(sh2) >= 2 + assert sh1[-1] == sh2[-2] + + ndim1 = len(sh1) + t1_index = range(ndim1-2) + [ndim1-1, ndim1-2] + return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] * + M2[..., np.newaxis, :], -3) + + +def _scalar_vectorized(scalar, M): + """ + Scalar product between scalars and matrices. + """ + return scalar[:, np.newaxis, np.newaxis]*M + + +def _transpose_vectorized(M): + """ + Transposition of an array of matrices *M*. + """ + ndim = M.ndim + assert ndim == 3 + return np.transpose(M, [0, ndim-1, ndim-2]) + + +def _roll_vectorized(M, roll_indices, axis): + """ + Rolls an array of matrices along an axis according to an array of indices + *roll_indices* + *axis* can be either 0 (rolls rows) or 1 (rolls columns). + """ + assert axis in [0, 1] + ndim = M.ndim + assert ndim == 3 + ndim_roll = roll_indices.ndim + assert ndim_roll == 1 + sh = M.shape + r, c = sh[-2:] + assert sh[0] == roll_indices.shape[0] + vec_indices = np.arange(sh[0], dtype=np.int32) + + # Builds the rolled matrix + M_roll = np.empty_like(M) + if axis == 0: + for ir in range(r): + for ic in range(c): + M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic] + elif axis == 1: + for ir in range(r): + for ic in range(c): + M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c] + return M_roll + + +def _to_matrix_vectorized(M): + """ + Builds an array of matrices from individuals np.arrays of identical + shapes. + *M*: ncols-list of nrows-lists of shape sh. + + Returns M_res np.array of shape (sh, nrow, ncols) so that: + M_res[...,i,j] = M[i][j] + """ + assert isinstance(M, (tuple, list)) + assert all([isinstance(item, (tuple, list)) for item in M]) + c_vec = np.asarray([len(item) for item in M]) + assert np.all(c_vec-c_vec[0] == 0) + r = len(M) + c = c_vec[0] + M00 = np.asarray(M[0][0]) + dt = M00.dtype + sh = [M00.shape[0], r, c] + M_ret = np.empty(sh, dtype=dt) + for irow in range(r): + for icol in range(c): + M_ret[:, irow, icol] = np.asarray(M[irow][icol]) + return M_ret + + +def _extract_submatrices(M, block_indices, block_size, axis): + """ + Extracts selected blocks of a matrices *M* depending on parameters + *block_indices* and *block_size*. + + Returns the array of extracted matrices *Mres* so that: + M_res[...,ir,:] = M[(block_indices*block_size+ir), :] + """ + assert block_indices.ndim == 1 + assert axis in [0, 1] + + r, c = M.shape + if axis == 0: + sh = [block_indices.shape[0], block_size, c] + elif axis == 1: + sh = [block_indices.shape[0], r, block_size] + + dt = M.dtype + M_res = np.empty(sh, dtype=dt) + if axis == 0: + for ir in range(block_size): + M_res[:, ir, :] = M[(block_indices*block_size+ir), :] + elif axis == 1: + for ic in range(block_size): + M_res[:, :, ic] = M[:, (block_indices*block_size+ic)] + + return M_res diff --git a/lib/matplotlib/tri/trirefine.py b/lib/matplotlib/tri/trirefine.py new file mode 100644 index 000000000000..945409fc8255 --- /dev/null +++ b/lib/matplotlib/tri/trirefine.py @@ -0,0 +1,324 @@ +""" +Mesh refinement for triangular grids. +""" +from __future__ import print_function +import numpy as np +from matplotlib.tri.triangulation import Triangulation +import matplotlib.tri.triinterpolate + + +class TriRefiner(object): + """ + Abstract base class for classes implementing mesh refinement. + + A TriRefiner encapsulates a Triangulation object and provides tools for + mesh refinement and interpolation. + + Derived classes must implements: + + - ``refine_triangulation(return_tri_index=False, **kwargs)`` , where + the optional keyword arguments *kwargs* are defined in each + TriRefiner concrete implementation, and which returns : + + - a refined triangulation + - optionally (depending on *return_tri_index*), for each + point of the refined triangulation: the index of + the initial triangulation triangle to which it belongs. + + - ``refine_field(z, triinterpolator=None, **kwargs)`` , where: + + - *z* array of field values (to refine) defined at the base + triangulation nodes + - *triinterpolator* is a + :class:`~matplotlib.tri.TriInterpolator` (optional) + - the other optional keyword arguments *kwargs* are defined in + each TriRefiner concrete implementation + and which returns (as a tuple) a refined triangular mesh and the + interpolated values of the field at the refined triangulation nodes. + + """ + def __init__(self, triangulation): + if not isinstance(triangulation, Triangulation): + raise ValueError("Expected a Triangulation object") + self._triangulation = triangulation + + +class UniformTriRefiner(TriRefiner): + """ + Uniform mesh refinement by recursive subdivisions. + + Parameters + ---------- + triangulation : :class:`~matplotlib.tri.Triangulation` + The encapsulated triangulation (to be refined) + """ +# See Also +# -------- +# :class:`~matplotlib.tri.CubicTriInterpolator` and +# :class:`~matplotlib.tri.TriAnalyzer`. +# """ + def __init__(self, triangulation): + TriRefiner.__init__(self, triangulation) + + def refine_triangulation(self, return_tri_index=False, subdiv=3): + """ + Computes an uniformly refined triangulation *refi_triangulation* of + the encapsulated :attr:`triangulation`. + + This function refines the encapsulated triangulation by splitting each + father triangle into 4 child sub-triangles built on the edges midside + nodes, recursively (level of recursion *subdiv*). + In the end, each triangle is hence divided into ``4**subdiv`` + child triangles. + The default value for *subdiv* is 3 resulting in 64 refined + subtriangles for each triangle of the initial triangulation. + + Parameters + ---------- + return_tri_index : boolean, optional + Boolean indicating whether an index table indicating the father + triangle index of each point will be returned. Default value + False. + subdiv : integer, optional + Recursion level for the subdivision. Defaults value 3. + Each triangle will be divided into ``4**subdiv`` child triangles. + + Returns + ------- + refi_triangulation : :class:`~matplotlib.tri.Triangulation` + The returned refined triangulation + found_index : array-like of integers + Index of the initial triangulation containing triangle, for each + point of *refi_triangulation*. + Returned only if *return_tri_index* is set to True. + + """ + refi_triangulation = self._triangulation + ntri = refi_triangulation.triangles.shape[0] + + # Computes the triangulation ancestors numbers in the reference + # triangulation. + ancestors = np.arange(ntri, dtype=np.int32) + for _ in range(subdiv): + refi_triangulation, ancestors = self._refine_triangulation_once( + refi_triangulation, ancestors) + refi_npts = refi_triangulation.x.shape[0] + refi_triangles = refi_triangulation.triangles + + # Now we compute found_index table if needed + if return_tri_index: + # We have to initialize found_index with -1 because some nodes + # may very well belong to no triangle at all, e.g. in case of + # Delaunay Triangulation with DuplicatePointWarning. + found_index = - np.ones(refi_npts, dtype=np.int32) + tri_mask = self._triangulation.mask + if tri_mask is None: + found_index[refi_triangles] = np.repeat(ancestors, 3) + else: + # There is a subtlety here: we want to avoid whenever possible + # that refined points container is a masked triangle (which + # would result in artifacts in plots). + # So we impose the numbering from masked ancestors first, + # then overwrite it with unmasked ancestor numbers. + ancestor_mask = tri_mask[ancestors] + found_index[refi_triangles[ancestor_mask, :] + ] = np.repeat(ancestors[ancestor_mask], 3) + found_index[refi_triangles[~ancestor_mask, :] + ] = np.repeat(ancestors[~ancestor_mask], 3) + return refi_triangulation, found_index + else: + return refi_triangulation + + def refine_field(self, z, triinterpolator=None, subdiv=3): + """ + Refines a field defined on the encapsulated triangulation. + + Returns *refi_tri* (refined triangulation), *refi_z* (interpolated + values of the field at the node of the refined triangulation). + + Parameters + ---------- + z : 1d-array-like of length ``n_points`` + Values of the field to refine, defined at the nodes of the + encapsulated triangulation. (``n_points`` is the number of points + in the initial triangulation) + triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional + Interpolator used for field interpolation. If not specified, + a :class:`~matplotlib.tri.CubicTriInterpolator` will + be used. + subdiv : integer, optional + Recursion level for the subdivision. Defaults to 3. + Each triangle will be divided into ``4**subdiv`` child triangles. + + Returns + ------- + refi_tri : :class:`~matplotlib.tri.Triangulation` object + The returned refined triangulation + refi_z : 1d array of length: *refi_tri* node count. + The returned interpolated field (at *refi_tri* nodes) + + Examples + -------- + The main application of this method is to plot high-quality + iso-contours on a coarse triangular grid (e.g. triangulation built + from relatively sparse test data): + + .. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py + + """ + if triinterpolator is None: + interp = matplotlib.tri.CubicTriInterpolator( + self._triangulation, z) + else: + if not isinstance(triinterpolator, + matplotlib.tri.TriInterpolator): + raise ValueError("Expected a TriInterpolator object") + interp = triinterpolator + + refi_tri, found_index = self.refine_triangulation( + subdiv=subdiv, return_tri_index=True) + refi_z = interp._interpolate_multikeys( + refi_tri.x, refi_tri.y, tri_index=found_index)[0] + return refi_tri, refi_z + + @staticmethod + def _refine_triangulation_once(triangulation, ancestors=None): + """ + This function refines a matplotlib.tri *triangulation* by splitting + each triangle into 4 child-masked_triangles built on the edges midside + nodes. + The masked triangles, if present, are also splitted but their children + returned masked. + + If *ancestors* is not provided, returns only a new triangulation: + child_triangulation. + + If the array-like key table *ancestor* is given, it shall be of shape + (ntri,) where ntri is the number of *triangulation* masked_triangles. + In this case, the function returns + (child_triangulation, child_ancestors) + child_ancestors is defined so that the 4 child masked_triangles share + the same index as their father: child_ancestors.shape = (4 * ntri,). + + """ + x = triangulation.x + y = triangulation.y + + # According to tri.triangulation doc: + # neighbors[i,j] is the triangle that is the neighbor + # to the edge from point index masked_triangles[i,j] to point + # index masked_triangles[i,(j+1)%3]. + neighbors = triangulation.neighbors + triangles = triangulation.triangles + npts = np.shape(x)[0] + ntri = np.shape(triangles)[0] + if ancestors is not None: + ancestors = np.asarray(ancestors) + if np.shape(ancestors) != (ntri,): + raise ValueError( + "Incompatible shapes provide for triangulation" + ".masked_triangles and ancestors: {0} and {1}".format( + np.shape(triangles), np.shape(ancestors))) + + # Initiating tables refi_x and refi_y of the refined triangulation + # points + # hint: each apex is shared by 2 masked_triangles except the borders. + borders = np.sum(neighbors == -1) + added_pts = (3*ntri + borders) / 2 + refi_npts = npts + added_pts + refi_x = np.zeros(refi_npts) + refi_y = np.zeros(refi_npts) + + # First part of refi_x, refi_y is just the initial points + refi_x[:npts] = x + refi_y[:npts] = y + + # Second part contains the edge midside nodes. + # Each edge belongs to 1 triangle (if border edge) or is shared by 2 + # masked_triangles (interior edge). + # We first build 2 * ntri arrays of edge starting nodes (edge_elems, + # edge_apexes) ; we then extract only the masters to avoid overlaps. + # The so-called 'master' is the triangle with biggest index + # The 'slave' is the triangle with lower index + # (can be -1 if border edge) + # For slave and master we will identify the apex pointing to the edge + # start + edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32), + np.arange(ntri, dtype=np.int32), + np.arange(ntri, dtype=np.int32)])) + edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32), + np.ones(ntri, dtype=np.int32), + np.ones(ntri, dtype=np.int32)*2])) + edge_neighbors = neighbors[edge_elems, edge_apexes] + mask_masters = (edge_elems > edge_neighbors) + + # Identifying the "masters" and adding to refi_x, refi_y vec + masters = edge_elems[mask_masters] + apex_masters = edge_apexes[mask_masters] + x_add = (x[triangles[masters, apex_masters]] + + x[triangles[masters, (apex_masters+1) % 3]]) * 0.5 + y_add = (y[triangles[masters, apex_masters]] + + y[triangles[masters, (apex_masters+1) % 3]]) * 0.5 + refi_x[npts:] = x_add + refi_y[npts:] = y_add + + # Building the new masked_triangles ; each old masked_triangles hosts + # 4 new masked_triangles + # there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and + # 3 new_pt_midside + new_pt_corner = triangles + + # What is the index in refi_x, refi_y of point at middle of apex iapex + # of elem ielem ? + # If ielem is the apex master: simple count, given the way refi_x was + # built. + # If ielem is the apex slave: yet we do not know ; but we will soon + # using the neighbors table. + new_pt_midside = np.empty([ntri, 3], dtype=np.int32) + cum_sum = npts + for imid in range(3): + mask_st_loc = (imid == apex_masters) + n_masters_loc = np.sum(mask_st_loc) + elem_masters_loc = masters[mask_st_loc] + new_pt_midside[:, imid][elem_masters_loc] = np.arange( + n_masters_loc, dtype=np.int32) + cum_sum + cum_sum += n_masters_loc + + # Now dealing with slave elems. + # for each slave element we identify the master and then the inode + # onces slave_masters is indentified, slave_masters_apex is such that: + # neighbors[slaves_masters, slave_masters_apex] == slaves + mask_slaves = np.logical_not(mask_masters) + slaves = edge_elems[mask_slaves] + slaves_masters = edge_neighbors[mask_slaves] + diff_table = np.abs(neighbors[slaves_masters, :] - + np.outer(slaves, np.ones(3, dtype=np.int32))) + slave_masters_apex = np.argmin(diff_table, axis=1) + slaves_apex = edge_apexes[mask_slaves] + new_pt_midside[slaves, slaves_apex] = new_pt_midside[ + slaves_masters, slave_masters_apex] + + # Builds the 4 child masked_triangles + child_triangles = np.empty([ntri*4, 3], dtype=np.int32) + child_triangles[0::4, :] = np.vstack([ + new_pt_corner[:, 0], new_pt_midside[:, 0], + new_pt_midside[:, 2]]).T + child_triangles[1::4, :] = np.vstack([ + new_pt_corner[:, 1], new_pt_midside[:, 1], + new_pt_midside[:, 0]]).T + child_triangles[2::4, :] = np.vstack([ + new_pt_corner[:, 2], new_pt_midside[:, 2], + new_pt_midside[:, 1]]).T + child_triangles[3::4, :] = np.vstack([ + new_pt_midside[:, 0], new_pt_midside[:, 1], + new_pt_midside[:, 2]]).T + child_triangulation = Triangulation(refi_x, refi_y, child_triangles) + + # Builds the child mask + if triangulation.mask is not None: + child_triangulation.set_mask(np.repeat(triangulation.mask, 4)) + + if ancestors is None: + return child_triangulation + else: + return child_triangulation, np.repeat(ancestors, 4) diff --git a/lib/matplotlib/tri/tritools.py b/lib/matplotlib/tri/tritools.py new file mode 100644 index 000000000000..f6ba3efe1a69 --- /dev/null +++ b/lib/matplotlib/tri/tritools.py @@ -0,0 +1,308 @@ +""" +Tools for triangular grids. +""" +from __future__ import print_function +from matplotlib.tri import Triangulation +import numpy as np + + +class TriAnalyzer(object): + """ + Define basic tools for triangular mesh analysis and improvement. + + A TriAnalizer encapsulates a :class:`~matplotlib.tri.Triangulation` + object and provides basic tools for mesh analysis and mesh improvement. + + Parameters + ---------- + triangulation : :class:`~matplotlib.tri.Triangulation` object + The encapsulated triangulation to analyze. + + Attributes + ---------- + scale_factors + + """ + def __init__(self, triangulation): + if not isinstance(triangulation, Triangulation): + raise ValueError("Expected a Triangulation object") + self._triangulation = triangulation + + @property + def scale_factors(self): + """ + Factors to rescale the triangulation into a unit square. + + Returns *k*, tuple of 2 scale factors. + + Returns + ------- + k : tuple of 2 floats (kx, ky) + Tuple of floats that would rescale the triangulation : + ``[triangulation.x * kx, triangulation.y * ky]`` + fits exactly inside a unit square. + + """ + compressed_triangles = self._triangulation.get_masked_triangles() + node_used = (np.bincount(np.ravel(compressed_triangles)) != 0) + x = self._triangulation.x[node_used] + y = self._triangulation.y[node_used] + ux = np.max(x)-np.min(x) + uy = np.max(y)-np.min(y) + return (1./float(ux), 1./float(uy)) + + def circle_ratios(self, rescale=True): + """ + Returns a measure of the triangulation triangles flatness. + + The ratio of the incircle radius over the circumcircle radius is a + widely used indicator of a triangle flatness. + It is always ``<= 0.5`` and ``== 0.5`` only for equilateral + triangles. Circle ratios below 0.01 denote very flat triangles. + + To avoid unduly low values due to a difference of scale between the 2 + axis, the triangular mesh can first be rescaled to fit inside a unit + square with :attr:`scale_factors` (Only if *rescale* is True, which is + its default value). + + Parameters + ---------- + rescale : boolean, optional + If True, a rescaling will be internally performed (based on + :attr:`scale_factors`, so that the (unmasked) triangles fit + exactly inside a unit square mesh. Default is True. + + Returns + ------- + circle_ratios : masked array + Ratio of the incircle radius over the + circumcircle radius, for each 'rescaled' triangle of the + encapsulated triangulation. + Values corresponding to masked triangles are masked out. + + """ + # Coords rescaling + if rescale: + (kx, ky) = self.scale_factors + else: + (kx, ky) = (1.0, 1.0) + pts = np.vstack([self._triangulation.x*kx, + self._triangulation.y*ky]).T + tri_pts = pts[self._triangulation.triangles] + # Computes the 3 side lengths + a = tri_pts[:, 1, :] - tri_pts[:, 0, :] + b = tri_pts[:, 2, :] - tri_pts[:, 1, :] + c = tri_pts[:, 0, :] - tri_pts[:, 2, :] + a = np.sqrt(a[:, 0]**2 + a[:, 1]**2) + b = np.sqrt(b[:, 0]**2 + b[:, 1]**2) + c = np.sqrt(c[:, 0]**2 + c[:, 1]**2) + # circumcircle and incircle radii + s = (a+b+c)*0.5 + prod = s*(a+b-s)*(a+c-s)*(b+c-s) + # We have to deal with flat triangles with infinite circum_radius + bool_flat = (prod == 0.) + if np.any(bool_flat): + # Pathologic flow + ntri = tri_pts.shape[0] + circum_radius = np.empty(ntri, dtype=np.float64) + circum_radius[bool_flat] = np.inf + abc = a*b*c + circum_radius[~bool_flat] = abc[~bool_flat] / ( + 4.0*np.sqrt(prod[~bool_flat])) + else: + # Normal optimized flow + circum_radius = (a*b*c) / (4.0*np.sqrt(prod)) + in_radius = (a*b*c) / (4.0*circum_radius*s) + circle_ratio = in_radius/circum_radius + mask = self._triangulation.mask + if mask is None: + return circle_ratio + else: + return np.ma.array(circle_ratio, mask=mask) + + def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True): + """ + Eliminates excessively flat border triangles from the triangulation. + + Returns a mask *new_mask* which allows to clean the encapsulated + triangulation from its border-located flat triangles + (according to their :meth:`circle_ratios`). + This mask is meant to be subsequently applied to the triangulation + using :func:`matplotlib.tri.Triangulation.set_mask` . + *new_mask* is an extension of the initial triangulation mask + in the sense that an initially masked triangle will remain masked. + + The *new_mask* array is computed recursively ; at each step flat + triangles are removed only if they share a side with the current + mesh border. Thus no new holes in the triangulated domain will be + created. + + Parameters + ---------- + min_circle_ratio : float, optional + Border triangles with incircle/circumcircle radii ratio r/R will + be removed if r/R < *min_circle_ratio*. Default value: 0.01 + rescale : boolean, optional + If True, a rescaling will first be internally performed (based on + :attr:`scale_factors` ), so that the (unmasked) triangles fit + exactly inside a unit square mesh. This rescaling accounts for the + difference of scale which might exist between the 2 axis. Default + (and recommended) value is True. + + Returns + ------- + new_mask : array-like of booleans + Mask to apply to encapsulated triangulation. + All the initially masked triangles remain masked in the + *new_mask*. + + Notes + ----- + The rationale behind this function is that a Delaunay + triangulation - of an unstructured set of points - sometimes contains + almost flat triangles at its border, leading to artifacts in plots + (especially for high-resolution contouring). + Masked with computed *new_mask*, the encapsulated + triangulation would contain no more unmasked border triangles + with a circle ratio below *min_circle_ratio*, thus improving the + mesh quality for subsequent plots or interpolation. + + Examples + -------- + Please refer to the following illustrating example: + + .. plot:: mpl_examples/pylab_examples/tricontour_smooth_delaunay.py + + """ + # Recursively computes the mask_current_borders, true if a triangle is + # at the border of the mesh OR touching the border through a chain of + # invalid aspect ratio masked_triangles. + ntri = self._triangulation.triangles.shape[0] + mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio + + current_mask = self._triangulation.mask + if current_mask is None: + current_mask = np.zeros(ntri, dtype=np.bool) + valid_neighbors = np.copy(self._triangulation.neighbors) + renum_neighbors = np.arange(ntri, dtype=np.int32) + nadd = -1 + while nadd != 0: + # The active wavefront is the triangles from the border (unmasked + # but with a least 1 neighbor equal to -1 + wavefront = ((np.min(valid_neighbors, axis=1) == -1) + & ~current_mask) + # The element from the active wavefront will be masked if their + # circle ratio is bad. + added_mask = np.logical_and(wavefront, mask_bad_ratio) + current_mask = (added_mask | current_mask) + nadd = np.sum(added_mask) + + # now we have to update the tables valid_neighbors + valid_neighbors[added_mask, :] = -1 + renum_neighbors[added_mask] = -1 + valid_neighbors = np.where(valid_neighbors == -1, -1, + renum_neighbors[valid_neighbors]) + + return np.ma.filled(current_mask, True) + + def _get_compressed_triangulation(self, return_tri_renum=False, + return_node_renum=False): + """ + Compress (if masked) the encapsulated triangulation. + + Returns minimal-length triangles array (*compressed_triangles*) and + coordinates arrays (*compressed_x*, *compressed_y*) that can still + describe the unmasked triangles of the encapsulated triangulation. + + Parameters + ---------- + return_tri_renum : boolean, optional + Indicates whether a renumbering table to translate the triangle + numbers from the encapsulated triangulation numbering into the + new (compressed) renumbering will be returned. + return_node_renum : boolean, optional + Indicates whether a renumbering table to translate the nodes + numbers from the encapsulated triangulation numbering into the + new (compressed) renumbering will be returned. + + Returns + ------- + compressed_triangles : array-like + the returned compressed triangulation triangles + compressed_x : array-like + the returned compressed triangulation 1st coordinate + compressed_y : array-like + the returned compressed triangulation 2nd coordinate + tri_renum : array-like of integers + renumbering table to translate the triangle numbers from the + encapsulated triangulation into the new (compressed) renumbering. + -1 for masked triangles (deleted from *compressed_triangles*). + Returned only if *return_tri_renum* is True. + node_renum : array-like of integers + renumbering table to translate the point numbers from the + encapsulated triangulation into the new (compressed) renumbering. + -1 for unused points (i.e. those deleted from *compressed_x* and + *compressed_y*). Returned only if *return_node_renum* is True. + + """ + # Valid triangles and renumbering + tri_mask = self._triangulation.mask + compressed_triangles = self._triangulation.get_masked_triangles() + ntri = self._triangulation.triangles.shape[0] + tri_renum = self._total_to_compress_renum(tri_mask, ntri) + + # Valid nodes and renumbering + node_mask = (np.bincount(np.ravel(compressed_triangles)) == 0) + compressed_x = self._triangulation.x[~node_mask] + compressed_y = self._triangulation.y[~node_mask] + node_renum = self._total_to_compress_renum(node_mask) + + # Now renumbering the valid triangles nodes + compressed_triangles = node_renum[compressed_triangles] + + # 4 cases possible for return + if not return_tri_renum: + if not return_node_renum: + return compressed_triangles, compressed_x, compressed_y + else: + return (compressed_triangles, compressed_x, compressed_y, + node_renum) + else: + if not return_node_renum: + return (compressed_triangles, compressed_x, compressed_y, + tri_renum) + else: + return (compressed_triangles, compressed_x, compressed_y, + tri_renum, node_renum) + + @staticmethod + def _total_to_compress_renum(mask, n=None): + """ + Parameters + ---------- + mask : 1d boolean array or None + mask + n : integer + length of the mask. Useful only id mask can be None + + Returns + ------- + renum : integer array + array so that (`valid_array` being a compressed array + based on a `masked_array` with mask *mask*) : + + - For all i such as mask[i] = False: + valid_array[renum[i]] = masked_array[i] + - For all i such as mask[i] = True: + renum[i] = -1 (invalid value) + + """ + if n is None: + n = np.size(mask) + if mask is not None: + renum = -np.ones(n, dtype=np.int32) # Default num is -1 + valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0) + renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32) + return renum + else: + return np.arange(n, dtype=np.int32)