Skip to content

Commit

Permalink
Shitty offset curves for GPU stroke expansion
Browse files Browse the repository at this point in the history
Implemented a stroking scheme in the flatten stage in which every
flattened line segment gets offset along the curve normal by the line
width. The offsetting is applied during the subdivision into quads.

This also includes some hacks to slightly improve rendering around cusps
by increasing the number of line segments. Rigorous handling of cusps
and inflection points isn't supported.
  • Loading branch information
armansito committed Nov 1, 2023
1 parent c8dcbb5 commit c96ccb7
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 35 deletions.
14 changes: 13 additions & 1 deletion crates/encoding/src/encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@

use super::{DrawColor, DrawTag, PathEncoder, PathTag, Style, Transform};

use peniko::{kurbo::Shape, BlendMode, BrushRef, Color, Fill};
use peniko::{
kurbo::{Shape, Stroke},
BlendMode, BrushRef, Color, Fill,
};

#[cfg(feature = "full")]
use {
Expand Down Expand Up @@ -172,6 +175,15 @@ impl Encoding {
}
}

/// Encodes a stroke style.
pub fn encode_stroke_style(&mut self, stroke: &Stroke) {
let style = Style::from_stroke(stroke);
if self.styles.last() != Some(&style) {
self.path_tags.push(PathTag::STYLE);
self.styles.push(style);
}
}

/// Encodes a transform.
///
/// If the given transform is different from the current one, encodes it and
Expand Down
6 changes: 4 additions & 2 deletions examples/scenes/src/test_scenes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,14 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) {
Color::rgb8(201, 147, 206),
Color::rgb8(150, 195, 160),
];
let simple_stroke = [LineTo((100., 0.).into())];
// TODO: need to correctly handle missing move-tos
let simple_stroke = [MoveTo((0., 0.).into()), LineTo((100., 0.).into())];
let join_stroke = [
MoveTo((0., 0.).into()),
CurveTo((20., 0.).into(), (42.5, 5.).into(), (50., 25.).into()),
CurveTo((57.5, 5.).into(), (80., 0.).into(), (100., 0.).into()),
];
let miter_stroke = [LineTo((90., 21.).into()), LineTo((0., 42.).into())];
let miter_stroke = [MoveTo((0., 0.).into()), LineTo((90., 21.).into()), LineTo((0., 42.).into())];
let cap_styles = [Cap::Butt, Cap::Square, Cap::Round];
let join_styles = [Join::Bevel, Join::Miter, Join::Round];
let miter_limits = [4., 5., 0.1, 10.];
Expand Down
111 changes: 99 additions & 12 deletions shader/flatten.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,55 @@ fn eval_cubic(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>, t: f32
return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t;
}

fn eval_cubic_tangent(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>, t: f32) -> vec2<f32> {
let dp0 = 3. * (p1 - p0);
let dp1 = 3. * (p2 - p1);
let dp2 = 3. * (p3 - p2);
return eval_quad(dp0, dp1, dp2, t);
}

fn eval_cubic_normal(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>, t: f32) -> vec2<f32> {
let tangent = eval_cubic_tangent(p0, p1, p2, p3, t);
return vec2(-tangent.y, tangent.x);
}

fn eval_quad_tangent(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, t: f32) -> vec2<f32> {
let dp0 = 2. * (p1 - p0);
let dp1 = 2. * (p2 - p1);
return mix(dp0, dp1, t);
}

fn eval_quad_normal(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, t: f32) -> vec2<f32> {
let tangent = eval_quad_tangent(p0, p1, p2, t);
return vec2(-tangent.y, tangent.x);
}

fn cubic_start_tangent(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>) -> vec2<f32> {
let EPS = 1e-12;
let d01 = p1 - p0;
let d02 = p2 - p0;
let d03 = p3 - p0;
return select(select(d03, d02, dot(d02, d02) > EPS), d01, dot(d01, d01) > EPS);
}

fn cubic_end_tangent(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>) -> vec2<f32> {
let EPS = 1e-12;
let d23 = p3 - p2;
let d13 = p3 - p1;
let d03 = p3 - p0;
return select(select(d03, d13, dot(d13, d13) > EPS), d23, dot(d23, d23) > EPS);
}

fn cubic_start_normal(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>) -> vec2<f32> {
let tangent = cubic_start_tangent(p0, p1, p2, p3);
return vec2(-tangent.y, tangent.x);
}

fn cubic_end_normal(p0: vec2<f32>, p1: vec2<f32>, p2: vec2<f32>, p3: vec2<f32>) -> vec2<f32> {
let tangent = cubic_end_tangent(p0, p1, p2, p3);
return vec2(-tangent.y, tangent.x);
}

let MAX_QUADS = 16u;

fn flatten_cubic(cubic: Cubic) {
Expand All @@ -97,7 +146,7 @@ fn flatten_cubic(cubic: Cubic) {
let p2 = cubic.p2;
let p3 = cubic.p3;
let err_v = 3.0 * (p2 - p1) + p0 - p3;
let err = dot(err_v, err_v);
var err = dot(err_v, err_v);
let ACCURACY = 0.25;
let Q_ACCURACY = ACCURACY * 0.1;
let REM_ACCURACY = ACCURACY - Q_ACCURACY;
Expand All @@ -113,11 +162,22 @@ fn flatten_cubic(cubic: Cubic) {
let qp2 = eval_cubic(p0, p1, p2, p3, t);
var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step);
qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2);
let params = estimate_subdiv(qp0, qp1, qp2, sqrt(REM_ACCURACY));

// HACK: this increase subdivision count as function of the stroke width for shitty strokes.

This comment has been minimized.

Copy link
@dragostis

dragostis Nov 6, 2023

Hi Arman! Not sure what the end-goal is here, but my solution for quadratic offset curves could be a good stop-gap solution until you have a better implementation for this?

This comment has been minimized.

Copy link
@armansito

armansito Nov 7, 2023

Author Collaborator

Hi Dragoș! That's interesting and your method can definitely be a good solution to estimate the subdivision count for the offset curves. AIUI it's estimating the parabola integral and its inverse using B-spline interpolation. Do you have the spline parameters documented somewhere?

That said, the long term goal is to fit the offset curves using an Euler Spiral, so the subdivision logic here will change quite a bit.

This comment has been minimized.

Copy link
@dragostis

dragostis Nov 8, 2023

Unfortunately, I didn't get to open source the code before returning my work laptop at Google, so I don't have the source anymore.

Using SciPy to get the values should be easy enough. I used this method to compute the spline. For selecting the points, I took advantage of the symmetry of the functions, selected a rectangle large enough, then repeatedly estimated error maximas in this rectangle and added them to the grid. I think I had around <20 points on each axis and was using a 2nd degree B-spline for this.

The evaluation for the B-spline lines here.

var tol = sqrt(REM_ACCURACY);
if cubic.flags == 1u {
tol *= min(1000., dot(cubic.stroke, cubic.stroke));
}
let params = estimate_subdiv(qp0, qp1, qp2, tol);
keep_params[i] = params;
val += params.val;
qp0 = qp2;
}

// HACK: normal vector used to offset line segments for shitty stroke handling.
var n0 = cubic_start_normal(p0, p1, p2, p3);
n0 = normalize(n0) * cubic.stroke;

let n = max(u32(ceil(val * (0.5 / sqrt(REM_ACCURACY)))), 1u);
var lp0 = p0;
qp0 = p0;
Expand All @@ -129,27 +189,45 @@ fn flatten_cubic(cubic: Cubic) {
let qp2 = eval_cubic(p0, p1, p2, p3, t);
var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step);
qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2);
let qp0_normal = eval_quad_normal(qp0, qp1, qp2, 0.);
let params = keep_params[i];
let u0 = approx_parabola_inv_integral(params.a0);
let u2 = approx_parabola_inv_integral(params.a2);
let uscale = 1.0 / (u2 - u0);
var val_target = f32(n_out) * v_step;
while n_out == n || val_target < val_sum + params.val {
var lp1: vec2<f32>;
var t1: f32;
if n_out == n {
lp1 = p3;
t1 = 1.;
} else {
let u = (val_target - val_sum) / params.val;
let a = mix(params.a0, params.a2, u);
let au = approx_parabola_inv_integral(a);
let t = (au - u0) * uscale;
t1 = t;
lp1 = eval_quad(qp0, qp1, qp2, t);
}

// Output line segment lp0..lp1
let line_ix = atomicAdd(&bump.lines, 1u);
// TODO: check failure
lines[line_ix] = LineSoup(cubic.path_ix, lp0, lp1);
if cubic.flags == 1u {
var n1: vec2f;
if all(lp1 == p3) {
n1 = cubic_end_normal(p0, p1, p2, p3);
} else {
n1 = eval_quad_normal(qp0, qp1, qp2, t1);
}
n1 = normalize(n1) * cubic.stroke;
let line_ix = atomicAdd(&bump.lines, 2u);
lines[line_ix] = LineSoup(cubic.path_ix, lp0 + n0, lp1 + n1);
lines[line_ix + 1u] = LineSoup(cubic.path_ix, lp1 - n1, lp0 - n0);
n0 = n1;
} else {
// Output line segment lp0..lp1
let line_ix = atomicAdd(&bump.lines, 1u);
// TODO: check failure
lines[line_ix] = LineSoup(cubic.path_ix, lp0, lp1);
}
n_out += 1u;
val_target += v_step;
lp0 = lp1;
Expand Down Expand Up @@ -220,8 +298,7 @@ fn main(

let out = &path_bboxes[tm.path_ix];
let style_flags = scene[config.style_base + tm.style_ix];
// TODO: We assume all paths are fills at the moment. This is where we will extract the stroke
// vs fill state using STYLE_FLAGS_STYLE_BIT.
// The fill bit is always set to 0 for strokes which represents a non-zero fill.
let draw_flags = select(DRAW_INFO_FLAGS_FILL_RULE_BIT, 0u, (style_flags & STYLE_FLAGS_FILL_BIT) == 0u);
if (tag_byte & PATH_TAG_PATH) != 0u {
(*out).draw_flags = draw_flags;
Expand Down Expand Up @@ -276,17 +353,27 @@ fn main(
}
var stroke = vec2(0.0, 0.0);
let is_stroke = (style_flags & STYLE_FLAGS_STYLE_BIT) != 0u;
/*
// TODO: the stroke handling here is dead code for now
if is_stroke {
// TODO: WIP
let linewidth = bitcast<f32>(scene[config.style_base + tm.style_ix + 1u]);
// See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm
// This is the correct bounding box, but we're not handling rendering
// in the isotropic case, so it may mismatch.
stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw));
bbox += vec4(-stroke, stroke);

flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke)));

// TODO: proper caps
let n0 = normalize(cubic_start_normal(p0, p1, p2, p3)) * stroke;
let n1 = normalize(cubic_end_normal(p0, p1, p2, p3)) * stroke;

let line_ix = atomicAdd(&bump.lines, 2u);
lines[line_ix] = LineSoup(tm.path_ix, p0 - n0, p0 + n0);
lines[line_ix + 1u] = LineSoup(tm.path_ix, p3 + n1, p3 - n1);
} else {
flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke)));
}
*/
flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke)));
// Update bounding box using atomics only. Computing a monoid is a
// potential future optimization.
if bbox.z > bbox.x || bbox.w > bbox.y {
Expand Down
59 changes: 39 additions & 20 deletions src/scene.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,26 +149,45 @@ impl<'a> SceneBuilder<'a> {
brush_transform: Option<Affine>,
shape: &impl Shape,
) {
// The setting for tolerance are a compromise. For most applications,
// shape tolerance doesn't matter, as the input is likely Bézier paths,
// which is exact. Note that shape tolerance is hard-coded as 0.1 in
// the encoding crate.
//
// Stroke tolerance is a different matter. Generally, the cost scales
// with inverse O(n^6), so there is moderate rendering cost to setting
// too fine a value. On the other hand, error scales with the transform
// applied post-stroking, so may exceed visible threshold. When we do
// GPU-side stroking, the transform will be known. In the meantime,
// this is a compromise.
const SHAPE_TOLERANCE: f64 = 0.01;
const STROKE_TOLERANCE: f64 = SHAPE_TOLERANCE;
let stroked = peniko::kurbo::stroke(
shape.path_elements(SHAPE_TOLERANCE),
style,
&Default::default(),
STROKE_TOLERANCE,
);
self.fill(Fill::NonZero, transform, brush, brush_transform, &stroked);
const GPU_STROKES: bool = true;
if GPU_STROKES {
// TODO: handle dashing by using a DashIterator
self.scene
.encode_transform(Transform::from_kurbo(&transform));
self.scene.encode_stroke_style(style);
if self.scene.encode_shape(shape, false) {
if let Some(brush_transform) = brush_transform {
if self
.scene
.encode_transform(Transform::from_kurbo(&(transform * brush_transform)))
{
self.scene.swap_last_path_tags();
}
}
self.scene.encode_brush(brush, 1.0);
}
} else {
// The setting for tolerance are a compromise. For most applications,
// shape tolerance doesn't matter, as the input is likely Bézier paths,
// which is exact. Note that shape tolerance is hard-coded as 0.1 in
// the encoding crate.
//
// Stroke tolerance is a different matter. Generally, the cost scales
// with inverse O(n^6), so there is moderate rendering cost to setting
// too fine a value. On the other hand, error scales with the transform
// applied post-stroking, so may exceed visible threshold. When we do
// GPU-side stroking, the transform will be known. In the meantime,
// this is a compromise.
const SHAPE_TOLERANCE: f64 = 0.01;
const STROKE_TOLERANCE: f64 = SHAPE_TOLERANCE;
let stroked = peniko::kurbo::stroke(
shape.path_elements(SHAPE_TOLERANCE),
style,
&Default::default(),
STROKE_TOLERANCE,
);
self.fill(Fill::NonZero, transform, brush, brush_transform, &stroked);
}
}

/// Draws an image at its natural size with the given transform.
Expand Down

0 comments on commit c96ccb7

Please sign in to comment.