Skip to content

Commit

Permalink
refactor(analysis): change the symbol for norm to align with the unic…
Browse files Browse the repository at this point in the history
…ode spec (#17575)

The characters in question are:

* U+2016 DOUBLE VERTICAL LINE `‖`. The Unicode Character Database says "used in pairs to indicate norm of a matrix", for instance [here](https://www.fileformat.info/info/unicode/char/2016/index.htm) and [here](https://unicode-explorer.com/c/2016)
* U+2225 PARALLEL TO `∥`. On some platforms this renders with a forward slant!

Previously `norm` was the latter and `parallel` was the former. This change swaps them around:

* `∥x∥`, `S ‖ T`: before
* `‖x‖`, `S ∥ T`: after

Code using U+2016 `‖` for fintype cardinality has been left unchanged.
  • Loading branch information
eric-wieser committed Nov 17, 2022
1 parent 720aee9 commit 17ef379
Show file tree
Hide file tree
Showing 211 changed files with 4,978 additions and 4,978 deletions.
60 changes: 30 additions & 30 deletions archive/imo/imo1972_q5.lean
Expand Up @@ -16,49 +16,49 @@ Prove that `|g(x)| ≤ 1` for all `x`.
-/

/--
This proof begins by introducing the supremum of `f`, `k ≤ 1` as well as `k' = k / g y`. We then
This proof begins by introducing the supremum of `f`, `k ≤ 1` as well as `k' = k / g y`. We then
suppose that the conclusion does not hold (`hneg`) and show that `k ≤ k'` (by
`2 * (f x * g y) ≤ 2 * k` obtained from the main hypothesis `hf1`) and that `k' < k` (obtained
`2 * (f x * g y) ≤ 2 * k` obtained from the main hypothesis `hf1`) and that `k' < k` (obtained
from `hneg` directly), finally raising a contradiction with `k' < k'`.
(Authored by Stanislas Polu inspired by Ruben Van de Velde).
-/
example (f g : ℝ → ℝ)
(hf1 : ∀ x, ∀ y, (f(x+y) + f(x-y)) = 2 * f(x) * g(y))
(hf2 : ∀ y, f(y)1)
(hf2 : ∀ y, f(y)1)
(hf3 : ∃ x, f(x) ≠ 0)
(y : ℝ) :
g(y)1 :=
g(y)1 :=
begin
set S := set.range (λ x, f x),
set S := set.range (λ x, f x),
-- Introduce `k`, the supremum of `f`.
let k : ℝ := Sup (S),

-- Show that `f x ≤ k`.
have hk₁ : ∀ x, f x ≤ k,
-- Show that `f x ≤ k`.
have hk₁ : ∀ x, f x ≤ k,
{ have h : bdd_above S, from1, set.forall_range_iff.mpr hf2⟩,
intro x,
exact le_cSup h (set.mem_range_self x), },
-- Show that `2 * (f x * g y) ≤ 2 * k`.
have hk₂ : ∀ x, 2 * (f x * g y) ≤ 2 * k,
-- Show that `2 * (f x * g y) ≤ 2 * k`.
have hk₂ : ∀ x, 2 * (f x * g y) ≤ 2 * k,
{ intro x,
calc 2 * (f x * g y)
= 2 * f x * g y : by simp [abs_mul, mul_assoc]
... = f (x + y) + f (x - y) : by rw hf1
... ≤ f (x + y) + f (x - y) : norm_add_le _ _
calc 2 * (f x * g y)
= 2 * f x * g y : by simp [abs_mul, mul_assoc]
... = f (x + y) + f (x - y) : by rw hf1
... ≤ f (x + y) + f (x - y) : norm_add_le _ _
... ≤ k + k : add_le_add (hk₁ _) (hk₁ _)
... = 2 * k : (two_mul _).symm, },

-- Suppose the conclusion does not hold.
by_contra' hneg,
set k' := k / g y,
set k' := k / g y,

-- Demonstrate that `k' < k` using `hneg`.
have H₁ : k' < k,
{ have h₁ : 0 < k,
{ obtain ⟨x, hx⟩ := hf3,
calc 0
< f x : norm_pos_iff.mpr hx
< f x : norm_pos_iff.mpr hx
... ≤ k : hk₁ x },
rw div_lt_iff,
apply lt_mul_of_one_lt_right h₁ hneg,
Expand All @@ -67,8 +67,8 @@ begin
-- Demonstrate that `k ≤ k'` using `hk₂`.
have H₂ : k ≤ k',
{ have h₁ : ∃ x : ℝ, x ∈ S,
{ use f 0, exact set.mem_range_self 0, },
have h₂ : ∀ x, f x ≤ k',
{ use f 0, exact set.mem_range_self 0, },
have h₂ : ∀ x, f x ≤ k',
{ intros x,
rw le_div_iff,
{ apply (mul_le_mul_left zero_lt_two).mp (hk₂ x) },
Expand All @@ -95,28 +95,28 @@ This is a more concise version of the proof proposed by Ruben Van de Velde.
-/
example (f g : ℝ → ℝ)
(hf1 : ∀ x, ∀ y, (f (x+y) + f(x-y)) = 2 * f(x) * g(y))
(hf2 : bdd_above (set.range (λ x, f x)))
(hf2 : bdd_above (set.range (λ x, f x)))
(hf3 : ∃ x, f(x) ≠ 0)
(y : ℝ) :
g(y)1 :=
g(y)1 :=
begin
obtain ⟨x, hx⟩ := hf3,
set k := ⨆ x, f x,
have h : ∀ x, f x ≤ k := le_csupr hf2,
set k := ⨆ x, f x,
have h : ∀ x, f x ≤ k := le_csupr hf2,
by_contra' H,
have hgy : 0 < g y,
have hgy : 0 < g y,
by linarith,
have k_pos : 0 < k := lt_of_lt_of_le (norm_pos_iff.mpr hx) (h x),
have : k / g y < k := (div_lt_iff hgy).mpr (lt_mul_of_one_lt_right k_pos H),
have : k ≤ k / g y,
{ suffices : ∀ x, f x ≤ k / g y, from csupr_le this,
have : k / g y < k := (div_lt_iff hgy).mpr (lt_mul_of_one_lt_right k_pos H),
have : k ≤ k / g y,
{ suffices : ∀ x, f x ≤ k / g y, from csupr_le this,
intro x,
suffices : 2 * (f x * g y) ≤ 2 * k,
suffices : 2 * (f x * g y) ≤ 2 * k,
by { rwa [le_div_iff hgy, ←mul_le_mul_left (zero_lt_two : (0 : ℝ) < 2)] },
calc 2 * (f x * g y)
= 2 * f x * g y : by simp [abs_mul, mul_assoc]
... = f (x + y) + f (x - y) : by rw hf1
... ≤ f (x + y) + f (x - y) : abs_add _ _
calc 2 * (f x * g y)
= 2 * f x * g y : by simp [abs_mul, mul_assoc]
... = f (x + y) + f (x - y) : by rw hf1
... ≤ f (x + y) + f (x - y) : abs_add _ _
... ≤ 2 * k : by linarith [h (x+y), h (x -y)] },
linarith,
end
10 changes: 5 additions & 5 deletions counterexamples/phillips.lean
Expand Up @@ -375,7 +375,7 @@ section
-/

lemma norm_indicator_le_one (s : set α) (x : α) :
(indicator s (1 : α → ℝ)) x1 :=
(indicator s (1 : α → ℝ)) x1 :=
by { simp only [indicator, pi.one_apply], split_ifs; norm_num }

/-- A functional in the dual space of bounded functions gives rise to a bounded additive measure,
Expand All @@ -392,8 +392,8 @@ def _root_.continuous_linear_map.to_bounded_additive_measure
by { ext x, simp [indicator_union_of_disjoint hst], },
rw [this, f.map_add],
end,
exists_bound := ⟨∥f∥, λ s, begin
have I : of_normed_add_comm_group_discrete (indicator s 1) 1 (norm_indicator_le_one s)1,
exists_bound := ⟨‖f‖, λ s, begin
have I : of_normed_add_comm_group_discrete (indicator s 1) 1 (norm_indicator_le_one s)1,
by apply norm_of_normed_add_comm_group_le _ zero_le_one,
apply le_trans (f.le_op_norm _),
simpa using mul_le_mul_of_nonneg_left I (norm_nonneg f),
Expand Down Expand Up @@ -492,7 +492,7 @@ lemma countable_spf_mem (Hcont : #ℝ = aleph 1) (y : ℝ) : {x | y ∈ spf Hcon
We construct a function `f` from `[0,1]` to a complete Banach space `B`, which is weakly measurable
(i.e., for any continuous linear form `φ` on `B` the function `φ ∘ f` is measurable), bounded in
norm (i.e., for all `x`, one has `f x ≤ 1`), and still `f` has no Pettis integral.
norm (i.e., for all `x`, one has `f x ≤ 1`), and still `f` has no Pettis integral.
This construction, due to Phillips, requires the continuum hypothesis. We will take for `B` the
space of all bounded functions on `ℝ`, with the supremum norm (no measure here, we are really
Expand Down Expand Up @@ -582,7 +582,7 @@ begin
end

/-- The function `f Hcont : ℝ → (discrete_copy ℝ →ᵇ ℝ)` is uniformly bounded by `1` in norm. -/
lemma norm_bound (Hcont : #ℝ = aleph 1) (x : ℝ) : f Hcont x1 :=
lemma norm_bound (Hcont : #ℝ = aleph 1) (x : ℝ) : f Hcont x1 :=
norm_of_normed_add_comm_group_le _ zero_le_one _

/-- The function `f Hcont : ℝ → (discrete_copy ℝ →ᵇ ℝ)` has no Pettis integral. -/
Expand Down
16 changes: 8 additions & 8 deletions src/analysis/ODE/gronwall.lean
Expand Up @@ -9,8 +9,8 @@ import analysis.special_functions.exp_deriv
# Grönwall's inequality
The main technical result of this file is the Grönwall-like inequality
`norm_le_gronwall_bound_of_norm_deriv_right_le`. It states that if `f : ℝ → E` satisfies `f a ≤ δ`
and `∀ x ∈ [a, b), f' x ≤ K * f x + ε`, then for all `x ∈ [a, b]` we have `f x ≤ δ * exp (K *
`norm_le_gronwall_bound_of_norm_deriv_right_le`. It states that if `f : ℝ → E` satisfies `f a ≤ δ`
and `∀ x ∈ [a, b), f' x ≤ K * f x + ε`, then for all `x ∈ [a, b]` we have `f x ≤ δ * exp (K *
x) + (ε / K) * (exp (K * x) - 1)`.
Then we use this inequality to prove some estimates on the possible rate of growth of the distance
Expand All @@ -22,7 +22,7 @@ Sec. 4.5][HubbardWest-ode], where `norm_le_gronwall_bound_of_norm_deriv_right_le
## TODO
- Once we have FTC, prove an inequality for a function satisfying `f' x ≤ K x * f x + ε`,
- Once we have FTC, prove an inequality for a function satisfying `f' x ≤ K x * f x + ε`,
or more generally `liminf_{y→x+0} (f y - f x)/(y - x) ≤ K x * f x + ε` with any sign
of `K x` and `f x`.
-/
Expand Down Expand Up @@ -101,7 +101,7 @@ the inequalities `f a ≤ δ` and
`∀ x ∈ [a, b), liminf_{z→x+0} (f z - f x)/(z - x) ≤ K * (f x) + ε`, then `f x`
is bounded by `gronwall_bound δ K ε (x - a)` on `[a, b]`.
See also `norm_le_gronwall_bound_of_norm_deriv_right_le` for a version bounding `f x`,
See also `norm_le_gronwall_bound_of_norm_deriv_right_le` for a version bounding `f x`,
`f : ℝ → E`. -/
theorem le_gronwall_bound_of_liminf_deriv_right_le {f f' : ℝ → ℝ} {δ K ε : ℝ} {a b : ℝ}
(hf : continuous_on f (Icc a b))
Expand All @@ -128,13 +128,13 @@ begin
end

/-- A Grönwall-like inequality: if `f : ℝ → E` is continuous on `[a, b]`, has right derivative
`f' x` at every point `x ∈ [a, b)`, and satisfies the inequalities `f a ≤ δ`,
`∀ x ∈ [a, b), f' x ≤ K * f x + ε`, then `f x` is bounded by `gronwall_bound δ K ε (x - a)`
`f' x` at every point `x ∈ [a, b)`, and satisfies the inequalities `f a ≤ δ`,
`∀ x ∈ [a, b), f' x ≤ K * f x + ε`, then `f x` is bounded by `gronwall_bound δ K ε (x - a)`
on `[a, b]`. -/
theorem norm_le_gronwall_bound_of_norm_deriv_right_le {f f' : ℝ → E} {δ K ε : ℝ} {a b : ℝ}
(hf : continuous_on f (Icc a b)) (hf' : ∀ x ∈ Ico a b, has_deriv_within_at f (f' x) (Ici x) x)
(ha : f a ≤ δ) (bound : ∀ x ∈ Ico a b, f' x ≤ K * f x + ε) :
∀ x ∈ Icc a b, f x ≤ gronwall_bound δ K ε (x - a) :=
(ha : f a ≤ δ) (bound : ∀ x ∈ Ico a b, f' x ≤ K * f x + ε) :
∀ x ∈ Icc a b, f x ≤ gronwall_bound δ K ε (x - a) :=
le_gronwall_bound_of_liminf_deriv_right_le (continuous_norm.comp_continuous_on hf)
(λ x hx r hr, (hf' x hx).liminf_right_slope_norm_le hr) ha bound

Expand Down
10 changes: 5 additions & 5 deletions src/analysis/ODE/picard_lindelof.lean
Expand Up @@ -51,7 +51,7 @@ structure is_picard_lindelof
(hR : 0 ≤ R)
(lipschitz : ∀ t ∈ Icc t_min t_max, lipschitz_on_with L (v t) (closed_ball x₀ R))
(cont : ∀ x ∈ closed_ball x₀ R, continuous_on (λ (t : ℝ), v t x) (Icc t_min t_max))
(norm_le : ∀ (t ∈ Icc t_min t_max) (x ∈ closed_ball x₀ R), v t x ≤ C)
(norm_le : ∀ (t ∈ Icc t_min t_max) (x ∈ closed_ball x₀ R), v t x ≤ C)
(C_mul_le_R : (C : ℝ) * linear_order.max (t_max - t₀) (t₀ - t_min) ≤ R)

/-- This structure holds arguments of the Picard-Lipschitz (Cauchy-Lipschitz) theorem. It is part of
Expand Down Expand Up @@ -100,7 +100,7 @@ have continuous_on (uncurry (flip v)) (closed_ball v.x₀ v.R ×ˢ Icc v.t_min v
this.comp continuous_swap.continuous_on (preimage_swap_prod _ _).symm.subset

lemma norm_le {t : ℝ} (ht : t ∈ Icc v.t_min v.t_max) {x : E} (hx : x ∈ closed_ball v.x₀ v.R) :
v t x ≤ v.C :=
v t x ≤ v.C :=
v.is_pl.norm_le _ ht _ hx

/-- The maximum of distances from `t₀` to the endpoints of `[t_min, t_max]`. -/
Expand Down Expand Up @@ -192,7 +192,7 @@ begin
exact ⟨(v.proj x).2, f.mem_closed_ball _⟩
end

lemma norm_v_comp_le (t : ℝ) : f.v_comp t ≤ v.C :=
lemma norm_v_comp_le (t : ℝ) : f.v_comp t ≤ v.C :=
v.norm_le (v.proj t).2 $ f.mem_closed_ball _

lemma dist_apply_le_dist (f₁ f₂ : fun_space v) (t : Icc v.t_min v.t_max) :
Expand Down Expand Up @@ -262,7 +262,7 @@ begin
simp only [dist_eq_norm, next_apply, add_sub_add_left_eq_sub,
← interval_integral.integral_sub (interval_integrable_v_comp _ _ _)
(interval_integrable_v_comp _ _ _), norm_integral_eq_norm_integral_Ioc] at *,
calc ∫ τ in Ι (v.t₀ : ℝ) t, f₁.v_comp τ - f₂.v_comp τ
calc ∫ τ in Ι (v.t₀ : ℝ) t, f₁.v_comp τ - f₂.v_comp τ
≤ ∫ τ in Ι (v.t₀ : ℝ) t, v.L * ((v.L * |τ - v.t₀|) ^ n / n! * d) :
begin
refine norm_integral_le_of_norm_le (continuous.integrable_on_interval_oc _) _,
Expand Down Expand Up @@ -338,7 +338,7 @@ end picard_lindelof

lemma is_picard_lindelof.norm_le₀ {E : Type*} [normed_add_comm_group E]
{v : ℝ → E → E} {t_min t₀ t_max : ℝ} {x₀ : E} {C R : ℝ} {L : ℝ≥0}
(hpl : is_picard_lindelof v t_min t₀ t_max x₀ L R C) : v t₀ x₀ ≤ C :=
(hpl : is_picard_lindelof v t_min t₀ t_max x₀ L R C) : v t₀ x₀ ≤ C :=
hpl.norm_le t₀ hpl.ht₀ x₀ $ mem_closed_ball_self hpl.hR

/-- Picard-Lindelöf (Cauchy-Lipschitz) theorem. -/
Expand Down

0 comments on commit 17ef379

Please sign in to comment.