-
Notifications
You must be signed in to change notification settings - Fork 0
/
Imp.v
1888 lines (1557 loc) · 65.8 KB
/
Imp.v
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
(** * Imp: Simple Imperative Programs *)
(** In this chapter, we begin a new direction that will continue for
the rest of the course. Up to now most of our attention has been
focused on various aspects of Coq itself, while from now on we'll
mostly be using Coq to formalize other things. (We'll continue to
pause from time to time to introduce a few additional aspects of
Coq.)
Our first case study is a _simple imperative programming language_
called Imp, embodying a tiny core fragment of conventional
mainstream languages such as C and Java. Here is a familiar
mathematical function written in Imp.
Z ::= X;;
Y ::= 1;;
WHILE not (Z = 0) DO
Y ::= Y * Z;;
Z ::= Z - 1
END
*)
(** This chapter looks at how to define the _syntax_ and _semantics_
of Imp; the chapters that follow develop a theory of _program
equivalence_ and introduce _Hoare Logic_, a widely used logic for
reasoning about imperative programs. *)
(* ####################################################### *)
(** *** Sflib *)
(** A minor technical point: Instead of asking Coq to import our
earlier definitions from chapter [Logic], we import a small library
called [Sflib.v], containing just a few definitions and theorems
from earlier chapters that we'll actually use in the rest of the
course. This change should be nearly invisible, since most of what's
missing from Sflib has identical definitions in the Coq standard
library. The main reason for doing it is to tidy the global Coq
environment so that, for example, it is easier to search for
relevant theorems. *)
Require Export SfLib.
(* ####################################################### *)
(** * Arithmetic and Boolean Expressions *)
(** We'll present Imp in three parts: first a core language of
_arithmetic and boolean expressions_, then an extension of these
expressions with _variables_, and finally a language of _commands_
including assignment, conditions, sequencing, and loops. *)
(* ####################################################### *)
(** ** Syntax *)
Module AExp.
(** These two definitions specify the _abstract syntax_ of
arithmetic and boolean expressions. *)
Inductive aexp : Type :=
| ANum : nat -> aexp
| APlus : aexp -> aexp -> aexp
| AMinus : aexp -> aexp -> aexp
| AMult : aexp -> aexp -> aexp.
Inductive bexp : Type :=
| BTrue : bexp
| BFalse : bexp
| BEq : aexp -> aexp -> bexp
| BLe : aexp -> aexp -> bexp
| BNot : bexp -> bexp
| BAnd : bexp -> bexp -> bexp.
(** In this chapter, we'll elide the translation from the
concrete syntax that a programmer would actually write to these
abstract syntax trees -- the process that, for example, would
translate the string ["1+2*3"] to the AST [APlus (ANum
1) (AMult (ANum 2) (ANum 3))]. The optional chapter [ImpParser]
develops a simple implementation of a lexical analyzer and parser
that can perform this translation. You do _not_ need to
understand that file to understand this one, but if you haven't
taken a course where these techniques are covered (e.g., a
compilers course) you may want to skim it. *)
(** *** *)
(** For comparison, here's a conventional BNF (Backus-Naur Form)
grammar defining the same abstract syntax:
a ::= nat
| a + a
| a - a
| a * a
b ::= true
| false
| a = a
| a <= a
| not b
| b and b
*)
(** Compared to the Coq version above...
- The BNF is more informal -- for example, it gives some
suggestions about the surface syntax of expressions (like the
fact that the addition operation is written [+] and is an
infix symbol) while leaving other aspects of lexical analysis
and parsing (like the relative precedence of [+], [-], and
[*]) unspecified. Some additional information -- and human
intelligence -- would be required to turn this description
into a formal definition (when implementing a compiler, for
example).
The Coq version consistently omits all this information and
concentrates on the abstract syntax only.
- On the other hand, the BNF version is lighter and
easier to read. Its informality makes it flexible, which is
a huge advantage in situations like discussions at the
blackboard, where conveying general ideas is more important
than getting every detail nailed down precisely.
Indeed, there are dozens of BNF-like notations and people
switch freely among them, usually without bothering to say which
form of BNF they're using because there is no need to: a
rough-and-ready informal understanding is all that's
needed. *)
(** It's good to be comfortable with both sorts of notations:
informal ones for communicating between humans and formal ones for
carrying out implementations and proofs. *)
(* ####################################################### *)
(** ** Evaluation *)
(** _Evaluating_ an arithmetic expression produces a number. *)
Fixpoint aeval (a : aexp) : nat :=
match a with
| ANum n => n
| APlus a1 a2 => (aeval a1) + (aeval a2)
| AMinus a1 a2 => (aeval a1) - (aeval a2)
| AMult a1 a2 => (aeval a1) * (aeval a2)
end.
Example test_aeval1:
aeval (APlus (ANum 2) (ANum 2)) = 4.
Proof. reflexivity. Qed.
(** *** *)
(** Similarly, evaluating a boolean expression yields a boolean. *)
Fixpoint beval (b : bexp) : bool :=
match b with
| BTrue => true
| BFalse => false
| BEq a1 a2 => beq_nat (aeval a1) (aeval a2)
| BLe a1 a2 => ble_nat (aeval a1) (aeval a2)
| BNot b1 => negb (beval b1)
| BAnd b1 b2 => andb (beval b1) (beval b2)
end.
(* ####################################################### *)
(** ** Optimization *)
(** We haven't defined very much yet, but we can already get
some mileage out of the definitions. Suppose we define a function
that takes an arithmetic expression and slightly simplifies it,
changing every occurrence of [0+e] (i.e., [(APlus (ANum 0) e])
into just [e]. *)
Fixpoint optimize_0plus (a:aexp) : aexp :=
match a with
| ANum n =>
ANum n
| APlus (ANum 0) e2 =>
optimize_0plus e2
| APlus e1 e2 =>
APlus (optimize_0plus e1) (optimize_0plus e2)
| AMinus e1 e2 =>
AMinus (optimize_0plus e1) (optimize_0plus e2)
| AMult e1 e2 =>
AMult (optimize_0plus e1) (optimize_0plus e2)
end.
(** To make sure our optimization is doing the right thing we
can test it on some examples and see if the output looks OK. *)
Example test_optimize_0plus:
optimize_0plus (APlus (ANum 2)
(APlus (ANum 0)
(APlus (ANum 0) (ANum 1))))
= APlus (ANum 2) (ANum 1).
Proof. reflexivity. Qed.
(** But if we want to be sure the optimization is correct --
i.e., that evaluating an optimized expression gives the same
result as the original -- we should prove it. *)
Theorem optimize_0plus_sound: forall a,
aeval (optimize_0plus a) = aeval a.
Proof.
intros a. induction a.
Case "ANum". reflexivity.
Case "APlus". destruct a1.
SCase "a1 = ANum n". destruct n.
SSCase "n = 0". simpl. apply IHa2.
SSCase "n <> 0". simpl. rewrite IHa2. reflexivity.
SCase "a1 = APlus a1_1 a1_2".
simpl. simpl in IHa1. rewrite IHa1.
rewrite IHa2. reflexivity.
SCase "a1 = AMinus a1_1 a1_2".
simpl. simpl in IHa1. rewrite IHa1.
rewrite IHa2. reflexivity.
SCase "a1 = AMult a1_1 a1_2".
simpl. simpl in IHa1. rewrite IHa1.
rewrite IHa2. reflexivity.
Case "AMinus".
simpl. rewrite IHa1. rewrite IHa2. reflexivity.
Case "AMult".
simpl. rewrite IHa1. rewrite IHa2. reflexivity. Qed.
(* ####################################################### *)
(** * Coq Automation *)
(** The repetition in this last proof is starting to be a little
annoying. If either the language of arithmetic expressions or the
optimization being proved sound were significantly more complex,
it would begin to be a real problem.
So far, we've been doing all our proofs using just a small handful
of Coq's tactics and completely ignoring its powerful facilities
for constructing parts of proofs automatically. This section
introduces some of these facilities, and we will see more over the
next several chapters. Getting used to them will take some
energy -- Coq's automation is a power tool -- but it will allow us
to scale up our efforts to more complex definitions and more
interesting properties without becoming overwhelmed by boring,
repetitive, low-level details. *)
(* ####################################################### *)
(** ** Tacticals *)
(** _Tacticals_ is Coq's term for tactics that take other tactics as
arguments -- "higher-order tactics," if you will. *)
(* ####################################################### *)
(** *** The [repeat] Tactical *)
(** The [repeat] tactical takes another tactic and keeps applying
this tactic until the tactic fails. Here is an example showing
that [100] is even using repeat. *)
Theorem ev100 : ev 100.
Proof.
repeat (apply ev_SS). (* applies ev_SS 50 times,
until [apply ev_SS] fails *)
apply ev_0.
Qed.
(* Print ev100. *)
(** The [repeat T] tactic never fails; if the tactic [T] doesn't apply
to the original goal, then repeat still succeeds without changing
the original goal (it repeats zero times). *)
Theorem ev100' : ev 100.
Proof.
repeat (apply ev_0). (* doesn't fail, applies ev_0 zero times *)
repeat (apply ev_SS). apply ev_0. (* we can continue the proof *)
Qed.
(** The [repeat T] tactic does not have any bound on the number of
times it applies [T]. If [T] is a tactic that always succeeds then
repeat [T] will loop forever (e.g. [repeat simpl] loops forever
since [simpl] always succeeds). While Coq's term language is
guaranteed to terminate, Coq's tactic language is not! *)
(* ####################################################### *)
(** *** The [try] Tactical *)
(** If [T] is a tactic, then [try T] is a tactic that is just like [T]
except that, if [T] fails, [try T] _successfully_ does nothing at
all (instead of failing). *)
Theorem silly1 : forall ae, aeval ae = aeval ae.
Proof. try reflexivity. (* this just does [reflexivity] *) Qed.
Theorem silly2 : forall (P : Prop), P -> P.
Proof.
intros P HP.
try reflexivity. (* just [reflexivity] would have failed *)
apply HP. (* we can still finish the proof in some other way *)
Qed.
(** Using [try] in a completely manual proof is a bit silly, but
we'll see below that [try] is very useful for doing automated
proofs in conjunction with the [;] tactical. *)
(* ####################################################### *)
(** *** The [;] Tactical (Simple Form) *)
(** In its most commonly used form, the [;] tactical takes two tactics
as argument: [T;T'] first performs the tactic [T] and then
performs the tactic [T'] on _each subgoal_ generated by [T]. *)
(** For example, consider the following trivial lemma: *)
Lemma foo : forall n, ble_nat 0 n = true.
Proof.
intros.
destruct n.
(* Leaves two subgoals, which are discharged identically... *)
Case "n=0". simpl. reflexivity.
Case "n=Sn'". simpl. reflexivity.
Qed.
(** We can simplify this proof using the [;] tactical: *)
Lemma foo' : forall n, ble_nat 0 n = true.
Proof.
intros.
destruct n; (* [destruct] the current goal *)
simpl; (* then [simpl] each resulting subgoal *)
reflexivity. (* and do [reflexivity] on each resulting subgoal *)
Qed.
(** Using [try] and [;] together, we can get rid of the repetition in
the proof that was bothering us a little while ago. *)
Theorem optimize_0plus_sound': forall a,
aeval (optimize_0plus a) = aeval a.
Proof.
intros a.
induction a;
(* Most cases follow directly by the IH *)
try (simpl; rewrite IHa1; rewrite IHa2; reflexivity).
(* The remaining cases -- ANum and APlus -- are different *)
Case "ANum". reflexivity.
Case "APlus".
destruct a1;
(* Again, most cases follow directly by the IH *)
try (simpl; simpl in IHa1; rewrite IHa1;
rewrite IHa2; reflexivity).
(* The interesting case, on which the [try...] does nothing,
is when [e1 = ANum n]. In this case, we have to destruct
[n] (to see whether the optimization applies) and rewrite
with the induction hypothesis. *)
SCase "a1 = ANum n". destruct n;
simpl; rewrite IHa2; reflexivity. Qed.
(** Coq experts often use this "[...; try... ]" idiom after a tactic
like [induction] to take care of many similar cases all at once.
Naturally, this practice has an analog in informal proofs.
Here is an informal proof of this theorem that matches the
structure of the formal one:
_Theorem_: For all arithmetic expressions [a],
aeval (optimize_0plus a) = aeval a.
_Proof_: By induction on [a]. The [AMinus] and [AMult] cases
follow directly from the IH. The remaining cases are as follows:
- Suppose [a = ANum n] for some [n]. We must show
aeval (optimize_0plus (ANum n)) = aeval (ANum n).
This is immediate from the definition of [optimize_0plus].
- Suppose [a = APlus a1 a2] for some [a1] and [a2]. We
must show
aeval (optimize_0plus (APlus a1 a2))
= aeval (APlus a1 a2).
Consider the possible forms of [a1]. For most of them,
[optimize_0plus] simply calls itself recursively for the
subexpressions and rebuilds a new expression of the same form
as [a1]; in these cases, the result follows directly from the
IH.
The interesting case is when [a1 = ANum n] for some [n].
If [n = ANum 0], then
optimize_0plus (APlus a1 a2) = optimize_0plus a2
and the IH for [a2] is exactly what we need. On the other
hand, if [n = S n'] for some [n'], then again [optimize_0plus]
simply calls itself recursively, and the result follows from
the IH. [] *)
(** This proof can still be improved: the first case (for [a = ANum
n]) is very trivial -- even more trivial than the cases that we
said simply followed from the IH -- yet we have chosen to write it
out in full. It would be better and clearer to drop it and just
say, at the top, "Most cases are either immediate or direct from
the IH. The only interesting case is the one for [APlus]..." We
can make the same improvement in our formal proof too. Here's how
it looks: *)
Theorem optimize_0plus_sound'': forall a,
aeval (optimize_0plus a) = aeval a.
Proof.
intros a.
induction a;
(* Most cases follow directly by the IH *)
try (simpl; rewrite IHa1; rewrite IHa2; reflexivity);
(* ... or are immediate by definition *)
try reflexivity.
(* The interesting case is when a = APlus a1 a2. *)
Case "APlus".
destruct a1; try (simpl; simpl in IHa1; rewrite IHa1;
rewrite IHa2; reflexivity).
SCase "a1 = ANum n". destruct n;
simpl; rewrite IHa2; reflexivity. Qed.
(* ####################################################### *)
(** *** The [;] Tactical (General Form) *)
(** The [;] tactical has a more general than the simple [T;T'] we've
seen above, which is sometimes also useful. If [T], [T1], ...,
[Tn] are tactics, then
T; [T1 | T2 | ... | Tn]
is a tactic that first performs [T] and then performs [T1] on the
first subgoal generated by [T], performs [T2] on the second
subgoal, etc.
So [T;T'] is just special notation for the case when all of the
[Ti]'s are the same tactic; i.e. [T;T'] is just a shorthand for:
T; [T' | T' | ... | T']
*)
(* ####################################################### *)
(** ** Defining New Tactic Notations *)
(** Coq also provides several ways of "programming" tactic scripts.
- The [Tactic Notation] idiom illustrated below gives a handy
way to define "shorthand tactics" that bundle several tactics
into a single command.
- For more sophisticated programming, Coq offers a small
built-in programming language called [Ltac] with primitives
that can examine and modify the proof state. The details are
a bit too complicated to get into here (and it is generally
agreed that [Ltac] is not the most beautiful part of Coq's
design!), but they can be found in the reference manual, and
there are many examples of [Ltac] definitions in the Coq
standard library that you can use as examples.
- There is also an OCaml API, which can be used to build tactics
that access Coq's internal structures at a lower level, but
this is seldom worth the trouble for ordinary Coq users.
The [Tactic Notation] mechanism is the easiest to come to grips with,
and it offers plenty of power for many purposes. Here's an example.
*)
Tactic Notation "simpl_and_try" tactic(c) :=
simpl;
try c.
(** This defines a new tactical called [simpl_and_try] which
takes one tactic [c] as an argument, and is defined to be
equivalent to the tactic [simpl; try c]. For example, writing
"[simpl_and_try reflexivity.]" in a proof would be the same as
writing "[simpl; try reflexivity.]" *)
(** The next subsection gives a more sophisticated use of this
feature... *)
(* ####################################################### *)
(** *** Bulletproofing Case Analyses *)
(** Being able to deal with most of the cases of an [induction]
or [destruct] all at the same time is very convenient, but it can
also be a little confusing. One problem that often comes up is
that _maintaining_ proofs written in this style can be difficult.
For example, suppose that, later, we extended the definition of
[aexp] with another constructor that also required a special
argument. The above proof might break because Coq generated the
subgoals for this constructor before the one for [APlus], so that,
at the point when we start working on the [APlus] case, Coq is
actually expecting the argument for a completely different
constructor. What we'd like is to get a sensible error message
saying "I was expecting the [AFoo] case at this point, but the
proof script is talking about [APlus]." Here's a nice trick (due
to Aaron Bohannon) that smoothly achieves this. *)
Tactic Notation "aexp_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "ANum" | Case_aux c "APlus"
| Case_aux c "AMinus" | Case_aux c "AMult" ].
(** ([Case_aux] implements the common functionality of [Case],
[SCase], [SSCase], etc. For example, [Case "foo"] is defined as
[Case_aux Case "foo".) *)
(** For example, if [a] is a variable of type [aexp], then doing
aexp_cases (induction a) Case
will perform an induction on [a] (the same as if we had just typed
[induction a]) and _also_ add a [Case] tag to each subgoal
generated by the [induction], labeling which constructor it comes
from. For example, here is yet another proof of
[optimize_0plus_sound], using [aexp_cases]: *)
Theorem optimize_0plus_sound''': forall a,
aeval (optimize_0plus a) = aeval a.
Proof.
intros a.
aexp_cases (induction a) Case;
try (simpl; rewrite IHa1; rewrite IHa2; reflexivity);
try reflexivity.
(* At this point, there is already an ["APlus"] case name
in the context. The [Case "APlus"] here in the proof
text has the effect of a sanity check: if the "Case"
string in the context is anything _other_ than ["APlus"]
(for example, because we added a clause to the definition
of [aexp] and forgot to change the proof) we'll get a
helpful error at this point telling us that this is now
the wrong case. *)
Case "APlus".
aexp_cases (destruct a1) SCase;
try (simpl; simpl in IHa1;
rewrite IHa1; rewrite IHa2; reflexivity).
SCase "ANum". destruct n;
simpl; rewrite IHa2; reflexivity. Qed.
(** **** Exercise: 3 stars (optimize_0plus_b) *)
(** Since the [optimize_0plus] tranformation doesn't change the value
of [aexp]s, we should be able to apply it to all the [aexp]s that
appear in a [bexp] without changing the [bexp]'s value. Write a
function which performs that transformation on [bexp]s, and prove
it is sound. Use the tacticals we've just seen to make the proof
as elegant as possible. *)
Fixpoint optimize_0plus_b (b : bexp) : bexp :=
(* FILL IN HERE *) admit.
Theorem optimize_0plus_b_sound : forall b,
beval (optimize_0plus_b b) = beval b.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, optional (optimizer) *)
(** _Design exercise_: The optimization implemented by our
[optimize_0plus] function is only one of many imaginable
optimizations on arithmetic and boolean expressions. Write a more
sophisticated optimizer and prove it correct.
(* FILL IN HERE *)
*)
(** [] *)
(* ####################################################### *)
(** ** The [omega] Tactic *)
(** The [omega] tactic implements a decision procedure for a subset of
first-order logic called _Presburger arithmetic_. It is based on
the Omega algorithm invented in 1992 by William Pugh.
If the goal is a universally quantified formula made out of
- numeric constants, addition ([+] and [S]), subtraction ([-]
and [pred]), and multiplication by constants (this is what
makes it Presburger arithmetic),
- equality ([=] and [<>]) and inequality ([<=]), and
- the logical connectives [/\], [\/], [~], and [->],
then invoking [omega] will either solve the goal or tell you that
it is actually false. *)
Example silly_presburger_example : forall m n o p,
m + n <= n + o /\ o + 3 = p + 3 ->
m <= p.
Proof.
intros. omega.
Qed.
(** Leibniz wrote, "It is unworthy of excellent men to lose
hours like slaves in the labor of calculation which could be
relegated to anyone else if machines were used." We recommend
using the omega tactic whenever possible. *)
(* ####################################################### *)
(** ** A Few More Handy Tactics *)
(** Finally, here are some miscellaneous tactics that you may find
convenient.
- [clear H]: Delete hypothesis [H] from the context.
- [subst x]: Find an assumption [x = e] or [e = x] in the
context, replace [x] with [e] throughout the context and
current goal, and clear the assumption.
- [subst]: Substitute away _all_ assumptions of the form [x = e]
or [e = x].
- [rename... into...]: Change the name of a hypothesis in the
proof context. For example, if the context includes a variable
named [x], then [rename x into y] will change all occurrences
of [x] to [y].
- [assumption]: Try to find a hypothesis [H] in the context that
exactly matches the goal; if one is found, behave just like
[apply H].
- [contradiction]: Try to find a hypothesis [H] in the current
context that is logically equivalent to [False]. If one is
found, solve the goal.
- [constructor]: Try to find a constructor [c] (from some
[Inductive] definition in the current environment) that can be
applied to solve the current goal. If one is found, behave
like [apply c]. *)
(** We'll see many examples of these in the proofs below. *)
(* ####################################################### *)
(** * Evaluation as a Relation *)
(** We have presented [aeval] and [beval] as functions defined by
[Fixpoints]. Another way to think about evaluation -- one that we
will see is often more flexible -- is as a _relation_ between
expressions and their values. This leads naturally to [Inductive]
definitions like the following one for arithmetic
expressions... *)
Module aevalR_first_try.
Inductive aevalR : aexp -> nat -> Prop :=
| E_ANum : forall (n: nat),
aevalR (ANum n) n
| E_APlus : forall (e1 e2: aexp) (n1 n2: nat),
aevalR e1 n1 ->
aevalR e2 n2 ->
aevalR (APlus e1 e2) (n1 + n2)
| E_AMinus: forall (e1 e2: aexp) (n1 n2: nat),
aevalR e1 n1 ->
aevalR e2 n2 ->
aevalR (AMinus e1 e2) (n1 - n2)
| E_AMult : forall (e1 e2: aexp) (n1 n2: nat),
aevalR e1 n1 ->
aevalR e2 n2 ->
aevalR (AMult e1 e2) (n1 * n2).
(** As is often the case with relations, we'll find it
convenient to define infix notation for [aevalR]. We'll write [e
|| n] to mean that arithmetic expression [e] evaluates to value
[n]. (This notation is one place where the limitation to ASCII
symbols becomes a little bothersome. The standard notation for
the evaluation relation is a double down-arrow. We'll typeset it
like this in the HTML version of the notes and use a double
vertical bar as the closest approximation in [.v] files.) *)
Notation "e '||' n" := (aevalR e n) : type_scope.
End aevalR_first_try.
(** In fact, Coq provides a way to use this notation in the definition
of [aevalR] itself. This avoids situations where we're working on
a proof involving statements in the form [e || n] but we have to
refer back to a definition written using the form [aevalR e n].
We do this by first "reserving" the notation, then giving the
definition together with a declaration of what the notation
means. *)
Reserved Notation "e '||' n" (at level 50, left associativity).
Inductive aevalR : aexp -> nat -> Prop :=
| E_ANum : forall (n:nat),
(ANum n) || n
| E_APlus : forall (e1 e2: aexp) (n1 n2 : nat),
(e1 || n1) -> (e2 || n2) -> (APlus e1 e2) || (n1 + n2)
| E_AMinus : forall (e1 e2: aexp) (n1 n2 : nat),
(e1 || n1) -> (e2 || n2) -> (AMinus e1 e2) || (n1 - n2)
| E_AMult : forall (e1 e2: aexp) (n1 n2 : nat),
(e1 || n1) -> (e2 || n2) -> (AMult e1 e2) || (n1 * n2)
where "e '||' n" := (aevalR e n) : type_scope.
Tactic Notation "aevalR_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "E_ANum" | Case_aux c "E_APlus"
| Case_aux c "E_AMinus" | Case_aux c "E_AMult" ].
(* ####################################################### *)
(** ** Inference Rule Notation *)
(** In informal discussions, it is convenient to write the rules for
[aevalR] and similar relations in the more readable graphical form
of _inference rules_, where the premises above the line justify
the conclusion below the line (we have already seen them in the
Prop chapter). *)
(** For example, the constructor [E_APlus]...
| E_APlus : forall (e1 e2: aexp) (n1 n2: nat),
aevalR e1 n1 ->
aevalR e2 n2 ->
aevalR (APlus e1 e2) (n1 + n2)
...would be written like this as an inference rule:
e1 || n1
e2 || n2
-------------------- (E_APlus)
APlus e1 e2 || n1+n2
*)
(** Formally, there is nothing very deep about inference rules:
they are just implications. You can read the rule name on the
right as the name of the constructor and read each of the
linebreaks between the premises above the line and the line itself
as [->]. All the variables mentioned in the rule ([e1], [n1],
etc.) are implicitly bound by universal quantifiers at the
beginning. (Such variables are often called _metavariables_ to
distinguish them from the variables of the language we are
defining. At the moment, our arithmetic expressions don't include
variables, but we'll soon be adding them.) The whole collection
of rules is understood as being wrapped in an [Inductive]
declaration (informally, this is either elided or else indicated
by saying something like "Let [aevalR] be the smallest relation
closed under the following rules..."). *)
(** For example, [||] is the smallest relation closed under these
rules:
----------- (E_ANum)
ANum n || n
e1 || n1
e2 || n2
-------------------- (E_APlus)
APlus e1 e2 || n1+n2
e1 || n1
e2 || n2
--------------------- (E_AMinus)
AMinus e1 e2 || n1-n2
e1 || n1
e2 || n2
-------------------- (E_AMult)
AMult e1 e2 || n1*n2
*)
(* ####################################################### *)
(** ** Equivalence of the Definitions *)
(** It is straightforward to prove that the relational and functional
definitions of evaluation agree on all possible arithmetic
expressions... *)
Theorem aeval_iff_aevalR : forall a n,
(a || n) <-> aeval a = n.
Proof.
split.
Case "->".
intros H.
aevalR_cases (induction H) SCase; simpl.
SCase "E_ANum".
reflexivity.
SCase "E_APlus".
rewrite IHaevalR1. rewrite IHaevalR2. reflexivity.
SCase "E_AMinus".
rewrite IHaevalR1. rewrite IHaevalR2. reflexivity.
SCase "E_AMult".
rewrite IHaevalR1. rewrite IHaevalR2. reflexivity.
Case "<-".
generalize dependent n.
aexp_cases (induction a) SCase;
simpl; intros; subst.
SCase "ANum".
apply E_ANum.
SCase "APlus".
apply E_APlus.
apply IHa1. reflexivity.
apply IHa2. reflexivity.
SCase "AMinus".
apply E_AMinus.
apply IHa1. reflexivity.
apply IHa2. reflexivity.
SCase "AMult".
apply E_AMult.
apply IHa1. reflexivity.
apply IHa2. reflexivity.
Qed.
(** Note: if you're reading the HTML file, you'll see an empty square box instead
of a proof for this theorem.
You can click on this box to "unfold" the text to see the proof.
Click on the unfolded to text to "fold" it back up to a box. We'll be using
this style frequently from now on to help keep the HTML easier to read.
The full proofs always appear in the .v files. *)
(** We can make the proof quite a bit shorter by making more
use of tacticals... *)
Theorem aeval_iff_aevalR' : forall a n,
(a || n) <-> aeval a = n.
Proof.
(* WORKED IN CLASS *)
split.
Case "->".
intros H; induction H; subst; reflexivity.
Case "<-".
generalize dependent n.
induction a; simpl; intros; subst; constructor;
try apply IHa1; try apply IHa2; reflexivity.
Qed.
(** **** Exercise: 3 stars (bevalR) *)
(** Write a relation [bevalR] in the same style as
[aevalR], and prove that it is equivalent to [beval].*)
(*
Inductive bevalR:
(* FILL IN HERE *)
*)
(** [] *)
End AExp.
(* ####################################################### *)
(** ** Computational vs. Relational Definitions *)
(** For the definitions of evaluation for arithmetic and boolean
expressions, the choice of whether to use functional or relational
definitions is mainly a matter of taste. In general, Coq has
somewhat better support for working with relations. On the other
hand, in some sense function definitions carry more information,
because functions are necessarily deterministic and defined on all
arguments; for a relation we have to show these properties
explicitly if we need them. Functions also take advantage of Coq's
computations mechanism.
However, there are circumstances where relational definitions of
evaluation are preferable to functional ones. *)
Module aevalR_division.
(** For example, suppose that we wanted to extend the arithmetic
operations by considering also a division operation:*)
Inductive aexp : Type :=
| ANum : nat -> aexp
| APlus : aexp -> aexp -> aexp
| AMinus : aexp -> aexp -> aexp
| AMult : aexp -> aexp -> aexp
| ADiv : aexp -> aexp -> aexp. (* <--- new *)
(** Extending the definition of [aeval] to handle this new operation
would not be straightforward (what should we return as the result
of [ADiv (ANum 5) (ANum 0)]?). But extending [aevalR] is
straightforward. *)
Inductive aevalR : aexp -> nat -> Prop :=
| E_ANum : forall (n:nat),
(ANum n) || n
| E_APlus : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (APlus a1 a2) || (n1 + n2)
| E_AMinus : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (AMinus a1 a2) || (n1 - n2)
| E_AMult : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (AMult a1 a2) || (n1 * n2)
| E_ADiv : forall (a1 a2: aexp) (n1 n2 n3: nat),
(a1 || n1) -> (a2 || n2) -> (mult n2 n3 = n1) -> (ADiv a1 a2) || n3
where "a '||' n" := (aevalR a n) : type_scope.
End aevalR_division.
Module aevalR_extended.
(** Suppose, instead, that we want to extend the arithmetic operations
by a nondeterministic number generator [any]:*)
Inductive aexp : Type :=
| AAny : aexp (* <--- NEW *)
| ANum : nat -> aexp
| APlus : aexp -> aexp -> aexp
| AMinus : aexp -> aexp -> aexp
| AMult : aexp -> aexp -> aexp.
(** Again, extending [aeval] would be tricky (because evaluation is
_not_ a deterministic function from expressions to numbers), but
extending [aevalR] is no problem: *)
Inductive aevalR : aexp -> nat -> Prop :=
| E_Any : forall (n:nat),
AAny || n (* <--- new *)
| E_ANum : forall (n:nat),
(ANum n) || n
| E_APlus : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (APlus a1 a2) || (n1 + n2)
| E_AMinus : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (AMinus a1 a2) || (n1 - n2)
| E_AMult : forall (a1 a2: aexp) (n1 n2 : nat),
(a1 || n1) -> (a2 || n2) -> (AMult a1 a2) || (n1 * n2)
where "a '||' n" := (aevalR a n) : type_scope.
End aevalR_extended.
(** * Expressions With Variables *)
(** Let's turn our attention back to defining Imp. The next thing we
need to do is to enrich our arithmetic and boolean expressions
with variables. To keep things simple, we'll assume that all
variables are global and that they only hold numbers. *)
(* ##################################################### *)
(** ** Identifiers *)
(** To begin, we'll need to formalize _identifiers_ such as program
variables. We could use strings for this -- or, in a real
compiler, fancier structures like pointers into a symbol table.
But for simplicity let's just use natural numbers as identifiers. *)
(** (We hide this section in a module because these definitions are
actually in [SfLib], but we want to repeat them here so that we
can explain them.) *)
Module Id.
(** We define a new inductive datatype [Id] so that we won't confuse
identifiers and numbers. We use [sumbool] to define a computable
equality operator on [Id]. *)
Inductive id : Type :=
Id : nat -> id.
Theorem eq_id_dec : forall id1 id2 : id, {id1 = id2} + {id1 <> id2}.
Proof.
intros id1 id2.
destruct id1 as [n1]. destruct id2 as [n2].
destruct (eq_nat_dec n1 n2) as [Heq | Hneq].
Case "n1 = n2".
left. rewrite Heq. reflexivity.
Case "n1 <> n2".
right. intros contra. inversion contra. apply Hneq. apply H0.
Defined.
(** The following lemmas will be useful for rewriting terms involving [eq_id_dec]. *)
Lemma eq_id : forall (T:Type) x (p q:T),
(if eq_id_dec x x then p else q) = p.
Proof.
intros.
destruct (eq_id_dec x x).
Case "x = x".
reflexivity.
Case "x <> x (impossible)".
apply ex_falso_quodlibet; apply n; reflexivity. Qed.
(** **** Exercise: 1 star, optional (neq_id) *)
Lemma neq_id : forall (T:Type) x y (p q:T), x <> y ->
(if eq_id_dec x y then p else q) = q.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
End Id.
(* ####################################################### *)
(** ** States *)
(** A _state_ represents the current values of _all_ the variables at
some point in the execution of a program. *)
(** For simplicity (to avoid dealing with partial functions), we
let the state be defined for _all_ variables, even though any
given program is only going to mention a finite number of them.
The state captures all of the information stored in memory. For Imp
programs, because each variable stores only a natural number, we
can represent the state as a mapping from identifiers to [nat].
For more complex programming languages, the state might have more
structure.
*)
Definition state := id -> nat.
Definition empty_state : state :=
fun _ => 0.
Definition update (st : state) (x : id) (n : nat) : state :=
fun x' => if eq_id_dec x x' then n else st x'.
(** For proofs involving states, we'll need several simple properties
of [update]. *)
(** **** Exercise: 1 star (update_eq) *)
Theorem update_eq : forall n x st,
(update st x n) x = n.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 1 star (update_neq) *)
Theorem update_neq : forall x2 x1 n st,
x2 <> x1 ->
(update st x2 n) x1 = (st x1).
Proof.
(* FILL IN HERE *) Admitted.