Theory Synopsis

(*:maxLineLen=78:*)

theory Synopsis
  imports Main Base
begin

chapter Synopsis

section Notepad

text 
  An Isar proof body serves as mathematical notepad to compose logical
  content, consisting of types, terms, facts.



subsection Types and terms

notepad
begin
  txt Locally fixed entities:
  fix x   ― ‹local constant, without any type information yet
  fix x :: 'a  ― ‹variant with explicit type-constraint for subsequent use

  fix a b
  assume "a = b"  ― ‹type assignment at first occurrence in concrete term

  txt Definitions (non-polymorphic):
  define x :: 'a where "x = t"

  txt Abbreviations (polymorphic):
  let ?f = "λx. x"
  term "?f ?f"

  txt Notation:
  write x  ("***")
end


subsection Facts

text 
  A fact is a simultaneous list of theorems.



subsubsection Producing facts

notepad
begin

  txt Via assumption (``lambda''):
  assume a: A

  txt Via proof (``let''):
  have b: B \<proof>

  txt Via abbreviation (``let''):
  note c = a b

end


subsubsection Referencing facts

notepad
begin
  txt Via explicit name:
  assume a: A
  note a

  txt Via implicit name:
  assume A
  note this

  txt Via literal proposition (unification with results from the proof text):
  assume A
  note A

  assume "x. B x"
  note B a
  note B b
end


subsubsection Manipulating facts

notepad
begin
  txt Instantiation:
  assume a: "x. B x"
  note a
  note a [of b]
  note a [where x = b]

  txt Backchaining:
  assume 1: A
  assume 2: "A  C"
  note 2 [OF 1]
  note 1 [THEN 2]

  txt Symmetric results:
  assume "x = y"
  note this [symmetric]

  assume "x  y"
  note this [symmetric]

  txt Adhoc-simplification (take care!):
  assume "P ([] @ xs)"
  note this [simplified]
end


subsubsection Projections

text 
  Isar facts consist of multiple theorems.  There is notation to project
  interval ranges.


notepad
begin
  assume stuff: A B C D
  note stuff(1)
  note stuff(2-3)
  note stuff(2-)
end


subsubsection Naming conventions

text 
   Lower-case identifiers are usually preferred.

   Facts can be named after the main term within the proposition.

   Facts should not be named after the command that
  introduced them (@{command "assume"}, @{command "have"}).  This is
  misleading and hard to maintain.

   Natural numbers can be used as ``meaningless'' names (more
  appropriate than a1›, a2› etc.)

   Symbolic identifiers are supported (e.g. *›, **›, ***›).



subsection Block structure

text 
  The formal notepad is block structured.  The fact produced by the last
  entry of a block is exported into the outer context.


notepad
begin
  {
    have a: A \<proof>
    have b: B \<proof>
    note a b
  }
  note this
  note A
  note B
end

text Explicit blocks as well as implicit blocks of nested goal
  statements (e.g.\ @{command have}) automatically introduce one extra
  pair of parentheses in reserve.  The @{command next} command allows
  to ``jump'' between these sub-blocks.

notepad
begin

  {
    have a: A \<proof>
  next
    have b: B
    proof -
      show B \<proof>
    next
      have c: C \<proof>
    next
      have d: D \<proof>
    qed
  }

  txt Alternative version with explicit parentheses everywhere:

  {
    {
      have a: A \<proof>
    }
    {
      have b: B
      proof -
        {
          show B \<proof>
        }
        {
          have c: C \<proof>
        }
        {
          have d: D \<proof>
        }
      qed
    }
  }

end


section Calculational reasoning \label{sec:calculations-synopsis}

text 
  For example, see 🗏‹~~/src/HOL/Isar_Examples/Group.thy›.



subsection Special names in Isar proofs

text 
   term ?thesis› --- the main conclusion of the
  innermost pending claim

   term …› --- the argument of the last explicitly
  stated result (for infix application this is the right-hand side)

   fact this› --- the last result produced in the text


notepad
begin
  have "x = y"
  proof -
    term ?thesis
    show ?thesis \<proof>
    term ?thesis  ― ‹static!
  qed
  term ""
  thm this
end

text Calculational reasoning maintains the special fact called
  ``calculation›'' in the background.  Certain language
  elements combine primary this› with secondary calculation›.


subsection Transitive chains

text The Idea is to combine this› and calculation›
  via typical trans› rules (see also @{command
  print_trans_rules}):

thm trans
thm less_trans
thm less_le_trans

notepad
begin
  txt Plain bottom-up calculation:
  have "a = b" \<proof>
  also
  have "b = c" \<proof>
  also
  have "c = d" \<proof>
  finally
  have "a = d" .

  txt Variant using the …› abbreviation:
  have "a = b" \<proof>
  also
  have " = c" \<proof>
  also
  have " = d" \<proof>
  finally
  have "a = d" .

  txt Top-down version with explicit claim at the head:
  have "a = d"
  proof -
    have "a = b" \<proof>
    also
    have " = c" \<proof>
    also
    have " = d" \<proof>
    finally
    show ?thesis .
  qed
next
  txt Mixed inequalities (require suitable base type):
  fix a b c d :: nat

  have "a < b" \<proof>
  also
  have "b  c" \<proof>
  also
  have "c = d" \<proof>
  finally
  have "a < d" .
end


subsubsection Notes

text 
   The notion of trans› rule is very general due to the
  flexibility of Isabelle/Pure rule composition.

   User applications may declare their own rules, with some care
  about the operational details of higher-order unification.



subsection Degenerate calculations

text The Idea is to append this› to calculation›, without rule composition.
  This is occasionally useful to avoid naming intermediate facts.

notepad
begin
  txt A vacuous proof:
  have A \<proof>
  moreover
  have B \<proof>
  moreover
  have C \<proof>
  ultimately
  have A and B and C .
next
  txt Slightly more content (trivial bigstep reasoning):
  have A \<proof>
  moreover
  have B \<proof>
  moreover
  have C \<proof>
  ultimately
  have "A  B  C" by blast
end

text Note that For multi-branch case splitting, it is better to use @{command
  consider}.


section Induction

subsection Induction as Natural Deduction

text In principle, induction is just a special case of Natural
  Deduction (see also \secref{sec:natural-deduction-synopsis}).  For
  example:

thm nat.induct
print_statement nat.induct

notepad
begin
  fix n :: nat
  have "P n"
  proof (rule nat.induct)  ― ‹fragile rule application!
    show "P 0" \<proof>
  next
    fix n :: nat
    assume "P n"
    show "P (Suc n)" \<proof>
  qed
end

text 
  In practice, much more proof infrastructure is required.

  The proof method @{method induct} provides:

   implicit rule selection and robust instantiation

   context elements via symbolic case names

   support for rule-structured induction statements, with local
  parameters, premises, etc.


notepad
begin
  fix n :: nat
  have "P n"
  proof (induct n)
    case 0
    show ?case \<proof>
  next
    case (Suc n)
    from Suc.hyps show ?case \<proof>
  qed
end


subsubsection Example

text 
  The subsequent example combines the following proof patterns:

   outermost induction (over the datatype structure of natural
  numbers), to decompose the proof problem in top-down manner

   calculational reasoning (\secref{sec:calculations-synopsis})
  to compose the result in each case

   solving local claims within the calculation by simplification


lemma
  fixes n :: nat
  shows "(i=0..n. i) = n * (n + 1) div 2"
proof (induct n)
  case 0
  have "(i=0..0. i) = (0::nat)" by simp
  also have " = 0 * (0 + 1) div 2" by simp
  finally show ?case .
next
  case (Suc n)
  have "(i=0..Suc n. i) = (i=0..n. i) + (n + 1)" by simp
  also have " = n * (n + 1) div 2 + (n + 1)" by (simp add: Suc.hyps)
  also have " = (n * (n + 1) + 2 * (n + 1)) div 2" by simp
  also have " = (Suc n * (Suc n + 1)) div 2" by simp
  finally show ?case .
qed

text This demonstrates how induction proofs can be done without
  having to consider the raw Natural Deduction structure.


subsection Induction with local parameters and premises

text Idea: Pure rule statements are passed through the induction
  rule.  This achieves convenient proof patterns, thanks to some
  internal trickery in the @{method induct} method.

  Important: Using compact HOL formulae with ∀/⟶› is a
  well-known anti-pattern! It would produce useless formal noise.


notepad
begin
  fix n :: nat
  fix P :: "nat  bool"
  fix Q :: "'a  nat  bool"

  have "P n"
  proof (induct n)
    case 0
    show "P 0" \<proof>
  next
    case (Suc n)
    from P n show "P (Suc n)" \<proof>
  qed

  have "A n  P n"
  proof (induct n)
    case 0
    from A 0 show "P 0" \<proof>
  next
    case (Suc n)
    from A n  P n
      and A (Suc n) show "P (Suc n)" \<proof>
  qed

  have "x. Q x n"
  proof (induct n)
    case 0
    show "Q x 0" \<proof>
  next
    case (Suc n)
    from x. Q x n show "Q x (Suc n)" \<proof>
    txt Local quantification admits arbitrary instances:
    note Q a n and Q b n
  qed
end


subsection Implicit induction context

text The @{method induct} method can isolate local parameters and
  premises directly from the given statement.  This is convenient in
  practical applications, but requires some understanding of what is
  going on internally (as explained above).

notepad
begin
  fix n :: nat
  fix Q :: "'a  nat  bool"

  fix x :: 'a
  assume "A x n"
  then have "Q x n"
  proof (induct n arbitrary: x)
    case 0
    from A x 0 show "Q x 0" \<proof>
  next
    case (Suc n)
    from x. A x n  Q x n  ― ‹arbitrary instances can be produced here
      and A x (Suc n) show "Q x (Suc n)" \<proof>
  qed
end


subsection Advanced induction with term definitions

text Induction over subexpressions of a certain shape are delicate
  to formalize.  The Isar @{method induct} method provides
  infrastructure for this.

  Idea: sub-expressions of the problem are turned into a defined
  induction variable; often accompanied with fixing of auxiliary
  parameters in the original expression.

notepad
begin
  fix a :: "'a  nat"
  fix A :: "nat  bool"

  assume "A (a x)"
  then have "P (a x)"
  proof (induct "a x" arbitrary: x)
    case 0
    note prem = A (a x)
      and defn = 0 = a x
    show "P (a x)" \<proof>
  next
    case (Suc n)
    note hyp = x. n = a x  A (a x)  P (a x)
      and prem = A (a x)
      and defn = Suc n = a x
    show "P (a x)" \<proof>
  qed
end


section Natural Deduction \label{sec:natural-deduction-synopsis}

subsection Rule statements

text 
  Isabelle/Pure ``theorems'' are always natural deduction rules,
  which sometimes happen to consist of a conclusion only.

  The framework connectives ⋀› and ⟹› indicate the
  rule structure declaratively.  For example:

thm conjI
thm impI
thm nat.induct

text 
  The object-logic is embedded into the Pure framework via an implicit
  derivability judgment termTrueprop :: bool  prop.

  Thus any HOL formulae appears atomic to the Pure framework, while
  the rule structure outlines the corresponding proof pattern.

  This can be made explicit as follows:


notepad
begin
  write Trueprop  ("Tr")

  thm conjI
  thm impI
  thm nat.induct
end

text 
  Isar provides first-class notation for rule statements as follows.


print_statement conjI
print_statement impI
print_statement nat.induct


subsubsection Examples

text 
  Introductions and eliminations of some standard connectives of
  the object-logic can be written as rule statements as follows.  (The
  proof ``@{command "by"}~@{method blast}'' serves as sanity check.)


lemma "(P  False)  ¬ P" by blast
lemma "¬ P  P  Q" by blast

lemma "P  Q  P  Q" by blast
lemma "P  Q  (P  Q  R)  R" by blast

lemma "P  P  Q" by blast
lemma "Q  P  Q" by blast
lemma "P  Q  (P  R)  (Q  R)  R" by blast

lemma "(x. P x)  (x. P x)" by blast
lemma "(x. P x)  P x" by blast

lemma "P x  (x. P x)" by blast
lemma "(x. P x)  (x. P x  R)  R" by blast

lemma "x  A  x  B  x  A  B" by blast
lemma "x  A  B  (x  A  x  B  R)  R" by blast

lemma "x  A  x  A  B" by blast
lemma "x  B  x  A  B" by blast
lemma "x  A  B  (x  A  R)  (x  B  R)  R" by blast


subsection Isar context elements

text We derive some results out of the blue, using Isar context
  elements and some explicit blocks.  This illustrates their meaning
  wrt.\ Pure connectives, without goal states getting in the way.

notepad
begin
  {
    fix x
    have "B x" \<proof>
  }
  have "x. B x" by fact

next

  {
    assume A
    have B \<proof>
  }
  have "A  B" by fact

next

  {
    define x where "x = t"
    have "B x" \<proof>
  }
  have "B t" by fact

next

  {
    obtain x :: 'a where "B x" \<proof>
    have C \<proof>
  }
  have C by fact

end


subsection Pure rule composition

text 
  The Pure framework provides means for:

   backward-chaining of rules by @{inference resolution}

   closing of branches by @{inference assumption}


  Both principles involve higher-order unification of λ›-terms
  modulo αβη›-equivalence (cf.\ Huet and Miller).


notepad
begin
  assume a: A and b: B
  thm conjI
  thm conjI [of A B]  ― ‹instantiation
  thm conjI [of A B, OF a b]  ― ‹instantiation and composition
  thm conjI [OF a b]  ― ‹composition via unification (trivial)
  thm conjI [OF A B]

  thm conjI [OF disjI1]
end

text Note: Low-level rule composition is tedious and leads to
  unreadable~/ unmaintainable expressions in the text.


subsection Structured backward reasoning

text Idea: Canonical proof decomposition via @{command fix}~/
  @{command assume}~/ @{command show}, where the body produces a
  natural deduction rule to refine some goal.

notepad
begin
  fix A B :: "'a  bool"

  have "x. A x  B x"
  proof -
    fix x
    assume "A x"
    show "B x" \<proof>
  qed

  have "x. A x  B x"
  proof -
    {
      fix x
      assume "A x"
      show "B x" \<proof>
    } ― ‹implicit block structure made explicit
    note x. A x  B x
      ― ‹side exit for the resulting rule
  qed
end


subsection Structured rule application

text 
  Idea: Previous facts and new claims are composed with a rule from
  the context (or background library).


notepad
begin
  assume r1: "A  B  C"  ― ‹simple rule (Horn clause)

  have A \<proof>  ― ‹prefix of facts via outer sub-proof
  then have C
  proof (rule r1)
    show B \<proof>  ― ‹remaining rule premises via inner sub-proof
  qed

  have C
  proof (rule r1)
    show A \<proof>
    show B \<proof>
  qed

  have A and B \<proof>
  then have C
  proof (rule r1)
  qed

  have A and B \<proof>
  then have C by (rule r1)

next

  assume r2: "A  (x. B1 x  B2 x)  C"  ― ‹nested rule

  have A \<proof>
  then have C
  proof (rule r2)
    fix x
    assume "B1 x"
    show "B2 x" \<proof>
  qed

  txt The compound rule premise propx. B1 x  B2 x is better
    addressed via @{command fix}~/ @{command assume}~/ @{command show}
    in the nested proof body.
end


subsection Example: predicate logic

text 
  Using the above principles, standard introduction and elimination proofs
  of predicate logic connectives of HOL work as follows.


notepad
begin
  have "A  B" and A \<proof>
  then have B ..

  have A \<proof>
  then have "A  B" ..

  have B \<proof>
  then have "A  B" ..

  have "A  B" \<proof>
  then have C
  proof
    assume A
    then show C \<proof>
  next
    assume B
    then show C \<proof>
  qed

  have A and B \<proof>
  then have "A  B" ..

  have "A  B" \<proof>
  then have A ..

  have "A  B" \<proof>
  then have B ..

  have False \<proof>
  then have A ..

  have True ..

  have "¬ A"
  proof
    assume A
    then show False \<proof>
  qed

  have "¬ A" and A \<proof>
  then have B ..

  have "x. P x"
  proof
    fix x
    show "P x" \<proof>
  qed

  have "x. P x" \<proof>
  then have "P a" ..

  have "x. P x"
  proof
    show "P a" \<proof>
  qed

  have "x. P x" \<proof>
  then have C
  proof
    fix a
    assume "P a"
    show C \<proof>
  qed

  txt Less awkward version using @{command obtain}:
  have "x. P x" \<proof>
  then obtain a where "P a" ..
end

text Further variations to illustrate Isar sub-proofs involving
  @{command show}:

notepad
begin
  have "A  B"
  proof  ― ‹two strictly isolated subproofs
    show A \<proof>
  next
    show B \<proof>
  qed

  have "A  B"
  proof  ― ‹one simultaneous sub-proof
    show A and B \<proof>
  qed

  have "A  B"
  proof  ― ‹two subproofs in the same context
    show A \<proof>
    show B \<proof>
  qed

  have "A  B"
  proof  ― ‹swapped order
    show B \<proof>
    show A \<proof>
  qed

  have "A  B"
  proof  ― ‹sequential subproofs
    show A \<proof>
    show B using A \<proof>
  qed
end


subsubsection Example: set-theoretic operators

text There is nothing special about logical connectives (∧›, ∨›, ∀›, ∃› etc.).  Operators from
  set-theory or lattice-theory work analogously.  It is only a matter
  of rule declarations in the library; rules can be also specified
  explicitly.


notepad
begin
  have "x  A" and "x  B" \<proof>
  then have "x  A  B" ..

  have "x  A" \<proof>
  then have "x  A  B" ..

  have "x  B" \<proof>
  then have "x  A  B" ..

  have "x  A  B" \<proof>
  then have C
  proof
    assume "x  A"
    then show C \<proof>
  next
    assume "x  B"
    then show C \<proof>
  qed

next
  have "x  A"
  proof
    fix a
    assume "a  A"
    show "x  a" \<proof>
  qed

  have "x  A" \<proof>
  then have "x  a"
  proof
    show "a  A" \<proof>
  qed

  have "a  A" and "x  a" \<proof>
  then have "x  A" ..

  have "x  A" \<proof>
  then obtain a where "a  A" and "x  a" ..
end


section Generalized elimination and cases

subsection General elimination rules

text 
  The general format of elimination rules is illustrated by the
  following typical representatives:


thm exE     ― ‹local parameter
thm conjE   ― ‹local premises
thm disjE   ― ‹split into cases

text 
  Combining these characteristics leads to the following general scheme
  for elimination rules with cases:

   prefix of assumptions (or ``major premises'')

   one or more cases that enable to establish the main conclusion
  in an augmented context


notepad
begin
  assume r:
    "A1  A2   ― ‹assumptions
      (x y. B1 x y  C1 x y  R)   ― ‹case 1
      (x y. B2 x y  C2 x y  R)   ― ‹case 2
      R  ― ‹main conclusion"

  have A1 and A2 \<proof>
  then have R
  proof (rule r)
    fix x y
    assume "B1 x y" and "C1 x y"
    show ?thesis \<proof>
  next
    fix x y
    assume "B2 x y" and "C2 x y"
    show ?thesis \<proof>
  qed
end

text Here ?thesis› is used to refer to the unchanged goal
  statement.


subsection Rules with cases

text 
  Applying an elimination rule to some goal, leaves that unchanged
  but allows to augment the context in the sub-proof of each case.

  Isar provides some infrastructure to support this:

   native language elements to state eliminations

   symbolic case names

   method @{method cases} to recover this structure in a
  sub-proof


print_statement exE
print_statement conjE
print_statement disjE

lemma
  assumes A1 and A2  ― ‹assumptions
  obtains
    (case1)  x y where "B1 x y" and "C1 x y"
  | (case2)  x y where "B2 x y" and "C2 x y"
  \<proof>


subsubsection Example

lemma tertium_non_datur:
  obtains
    (T)  A
  | (F)  "¬ A"
  by blast

notepad
begin
  fix x y :: 'a
  have C
  proof (cases "x = y" rule: tertium_non_datur)
    case T
    from x = y show ?thesis \<proof>
  next
    case F
    from x  y show ?thesis \<proof>
  qed
end


subsubsection Example

text 
  Isabelle/HOL specification mechanisms (datatype, inductive, etc.)
  provide suitable derived cases rules.


datatype foo = Foo | Bar foo

notepad
begin
  fix x :: foo
  have C
  proof (cases x)
    case Foo
    from x = Foo show ?thesis \<proof>
  next
    case (Bar a)
    from x = Bar a show ?thesis \<proof>
  qed
end


subsection Elimination statements and case-splitting

text 
  The @{command consider} states rules for generalized elimination and case
  splitting. This is like a toplevel statement theorem obtains used within
  a proof body; or like a multi-branch obtain without activation of the
  local context elements yet.

  The proof method @{method cases} is able to use such rules with
  forward-chaining (e.g.\ via then). This leads to the subsequent pattern
  for case-splitting in a particular situation within a proof.


notepad
begin
  consider (a) A | (b) B | (c) C
    \<proof>  ― ‹typically by auto›, by blast› etc.
  then have something
  proof cases
    case a
    then show ?thesis \<proof>
  next
    case b
    then show ?thesis \<proof>
  next
    case c
    then show ?thesis \<proof>
  qed
end

subsection Obtaining local contexts

text A single ``case'' branch may be inlined into Isar proof text
  via @{command obtain}.  This proves prop(x. B x  thesis) 
  thesis on the spot, and augments the context afterwards.

notepad
begin
  fix B :: "'a  bool"

  obtain x where "B x" \<proof>
  note B x

  txt Conclusions from this context may not mention termx again!
  {
    obtain x where "B x" \<proof>
    from B x have C \<proof>
  }
  note C
end

end