消除 Coq 中案例分析生成的冗余子目标
Eliminate redundant sub-goals generated by case analysis in Coq
简单定义
Inductive B := bb.
Inductive C := cc.
Inductive A :=
| mkA1 : B -> A
| mkA2 : C -> A.
Definition id (a: A) : A :=
match a with
| mkA1 b => mkA1 b
| mkA2 c => mkA2 c
end.
我尝试通过案例分析(破坏)来做证明,比如:
Theorem Foo :
forall a1 a2 : A , a1 <> a2 -> id a1 <> id a2.
Proof.
destruct a1; destruct a2.
Abort.
不出所料,当前证明状态包含两个等价的子目标:
b: B
c: C
______________________________________(2/4)
mkA1 b <> mkA2 c -> id (mkA1 b) <> id (mkA2 c)
______________________________________(3/4)
mkA2 c <> mkA1 b -> id (mkA2 c) <> id (mkA1 b)
在我看来,在进行结构性案例分析时,经常出现重复的子目标。有没有一些通用的方法来删除这些重复项?
我所做的是将第二个子目标按摩成第三个:
Focus 2;
intro; apply not_eq_sym in H; apply not_eq_sym; revert H;
Unfocus.
虽然我还是没办法要求Coq去重。现在我可以为我的第二个子目标证明一个引理,并在我的第三个子目标中重用它。但我想知道一些替代方案。
这里有一些策略自动化,可以删除重复的子目标。请注意,不仅目标必须完全匹配,上下文的顺序也必须匹配。在进行案例分析之前,您还需要 运行 初始化策略。这是 Coq >= 8.5 的代码。
Inductive B := bb.
Inductive C := cc.
Inductive A :=
| mkA1 : B -> A
| mkA2 : C -> A.
Definition id (a: A) : A :=
match a with
| mkA1 b => mkA1 b
| mkA2 c => mkA2 c
end.
Record duplicate_prod (A B : Type) := duplicate_conj { duplicate_fst : A ; duplicate_snd : B }.
Definition HERE := True.
Ltac start_remove_duplicates H :=
simple refine (let H___duplicates := @duplicate_conj _ _ I _ in _);
[ shelve | | ]; cycle 1.
Ltac find_dup H G :=
lazymatch type of H with
| duplicate_prod G _ => constr:(@duplicate_fst _ _ H)
| duplicate_prod _ _ => find_dup (@duplicate_snd _ _ H) G
end.
Ltac find_end H :=
lazymatch type of H with
| duplicate_prod _ _ => find_end (@duplicate_snd _ _ H)
| _ => H
end.
Ltac revert_until H :=
repeat lazymatch goal with
| [ H' : _ |- _ ]
=> first [ constr_eq H H'; fail 1
| revert H' ]
end.
Ltac remove_duplicates :=
[ > lazymatch goal with
| [ |- duplicate_prod _ _ ] => idtac
| [ H : duplicate_prod _ _ |- _ ]
=> generalize (I : HERE);
revert_until H;
let G := match goal with |- ?G => G end in
lazymatch type of H with
| context[duplicate_prod G]
=> let lem := find_dup H G in exact lem
| _ => let lem := find_end H in
refine (@duplicate_fst _ _ lem); clear H; (* clear to work around a bug in Coq *)
shelve
end
end.. ].
Ltac finish_duplicates :=
[ > lazymatch goal with
| [ H : duplicate_prod _ _ |- _ ] => clear H
end..
| repeat match goal with
| [ |- duplicate_prod _ ?e ]
=> split;
[ repeat lazymatch goal with
| [ |- HERE -> _ ] => fail
| _ => intro
end;
intros _
| try (is_evar e; exact I) ]
end ].
Theorem Foo :
forall a1 a2 : A , a1 <> a2 -> id a1 <> id a2.
Proof.
start_remove_duplicates.
destruct a1; destruct a2.
2:intro; apply not_eq_sym in H; apply not_eq_sym; revert c b H; intros c b.
all:remove_duplicates.
all:finish_duplicates.
我们的想法是您首先创建一个 evar 来保存针对独特目标的解决方案。然后你做案例分析。然后你通过这些目标,或者用来自 evar 的新投影来解决它们,或者,如果你看到你正在寻找的目标已经有一个解决方案,你就使用那个解决方案。最后,您将 evar 分成多个(去重)目标。关于还原创建 evar 时不存在的假设还有一些额外的样板(必须安抚类型良好的术语的变量范围),记住哪些内容最初来自上下文,并在最后将这些内容重新引入上下文.
简单定义
Inductive B := bb.
Inductive C := cc.
Inductive A :=
| mkA1 : B -> A
| mkA2 : C -> A.
Definition id (a: A) : A :=
match a with
| mkA1 b => mkA1 b
| mkA2 c => mkA2 c
end.
我尝试通过案例分析(破坏)来做证明,比如:
Theorem Foo :
forall a1 a2 : A , a1 <> a2 -> id a1 <> id a2.
Proof.
destruct a1; destruct a2.
Abort.
不出所料,当前证明状态包含两个等价的子目标:
b: B
c: C
______________________________________(2/4)
mkA1 b <> mkA2 c -> id (mkA1 b) <> id (mkA2 c)
______________________________________(3/4)
mkA2 c <> mkA1 b -> id (mkA2 c) <> id (mkA1 b)
在我看来,在进行结构性案例分析时,经常出现重复的子目标。有没有一些通用的方法来删除这些重复项?
我所做的是将第二个子目标按摩成第三个:
Focus 2;
intro; apply not_eq_sym in H; apply not_eq_sym; revert H;
Unfocus.
虽然我还是没办法要求Coq去重。现在我可以为我的第二个子目标证明一个引理,并在我的第三个子目标中重用它。但我想知道一些替代方案。
这里有一些策略自动化,可以删除重复的子目标。请注意,不仅目标必须完全匹配,上下文的顺序也必须匹配。在进行案例分析之前,您还需要 运行 初始化策略。这是 Coq >= 8.5 的代码。
Inductive B := bb.
Inductive C := cc.
Inductive A :=
| mkA1 : B -> A
| mkA2 : C -> A.
Definition id (a: A) : A :=
match a with
| mkA1 b => mkA1 b
| mkA2 c => mkA2 c
end.
Record duplicate_prod (A B : Type) := duplicate_conj { duplicate_fst : A ; duplicate_snd : B }.
Definition HERE := True.
Ltac start_remove_duplicates H :=
simple refine (let H___duplicates := @duplicate_conj _ _ I _ in _);
[ shelve | | ]; cycle 1.
Ltac find_dup H G :=
lazymatch type of H with
| duplicate_prod G _ => constr:(@duplicate_fst _ _ H)
| duplicate_prod _ _ => find_dup (@duplicate_snd _ _ H) G
end.
Ltac find_end H :=
lazymatch type of H with
| duplicate_prod _ _ => find_end (@duplicate_snd _ _ H)
| _ => H
end.
Ltac revert_until H :=
repeat lazymatch goal with
| [ H' : _ |- _ ]
=> first [ constr_eq H H'; fail 1
| revert H' ]
end.
Ltac remove_duplicates :=
[ > lazymatch goal with
| [ |- duplicate_prod _ _ ] => idtac
| [ H : duplicate_prod _ _ |- _ ]
=> generalize (I : HERE);
revert_until H;
let G := match goal with |- ?G => G end in
lazymatch type of H with
| context[duplicate_prod G]
=> let lem := find_dup H G in exact lem
| _ => let lem := find_end H in
refine (@duplicate_fst _ _ lem); clear H; (* clear to work around a bug in Coq *)
shelve
end
end.. ].
Ltac finish_duplicates :=
[ > lazymatch goal with
| [ H : duplicate_prod _ _ |- _ ] => clear H
end..
| repeat match goal with
| [ |- duplicate_prod _ ?e ]
=> split;
[ repeat lazymatch goal with
| [ |- HERE -> _ ] => fail
| _ => intro
end;
intros _
| try (is_evar e; exact I) ]
end ].
Theorem Foo :
forall a1 a2 : A , a1 <> a2 -> id a1 <> id a2.
Proof.
start_remove_duplicates.
destruct a1; destruct a2.
2:intro; apply not_eq_sym in H; apply not_eq_sym; revert c b H; intros c b.
all:remove_duplicates.
all:finish_duplicates.
我们的想法是您首先创建一个 evar 来保存针对独特目标的解决方案。然后你做案例分析。然后你通过这些目标,或者用来自 evar 的新投影来解决它们,或者,如果你看到你正在寻找的目标已经有一个解决方案,你就使用那个解决方案。最后,您将 evar 分成多个(去重)目标。关于还原创建 evar 时不存在的假设还有一些额外的样板(必须安抚类型良好的术语的变量范围),记住哪些内容最初来自上下文,并在最后将这些内容重新引入上下文.