Backward reasoning

We test backward reasoning for the $e_l = e_r$ in a monoid.


In [1]:
import $ivy.`io.github.siddhartha-gadgil::provingground-core-jvm:0.1.1-SNAPSHOT`


Out[1]:
import $ivy.$                                                                   

In [2]:
import provingground._ , interface._, HoTT._
import learning._
import library._, MonoidSimple._


Out[2]:
import provingground._ , interface._, HoTT._

import learning._

import library._, MonoidSimple._

In [3]:
repl.pprinter.bind(translation.FansiShow.fansiPrint)

In [4]:
val tg = TermGenParams(lmW=0, piW=0)


Out[4]:
tg: TermGenParams = TermGenParams(
  0.1,
  0.1,
  0.1,
  0.0,
  0.0,
  0.05,
  0.05,
  0.05,
  0.0,
  0.0,
  0.0,
  0.0,
  0.3,
  0.7,
  0.5,
  0.0,
  0.0
)

In [5]:
val ts = TermState(dist1, dist1.map(_.typ))


Out[5]:
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(e_l, 0.047619047619047616),
      Weighted(e_r, 0.047619047619047616),
      Weighted(mul, 0.047619047619047616),
      Weighted(eqM, 0.047619047619047616),
      Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
      Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
      Weighted(axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}, 0.047619047619047616),
      Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
      Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
      Weighted(eqM, 0.2857142857142857),
      Weighted(mul, 0.2857142857142857)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted(M, 0.047619047619047616),
      Weighted(M, 0.047619047619047616),
      Weighted((M → (M → M)), 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.047619047619047616),
      Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
      Weighted(∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } }, 0.047619047619047616),
      Weighted(
        ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
        0.047619047619047616
      ),
      Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
      Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.2857142857142857),
      Weighted((M → (M → M)), 0.2857142857142857)
    )
  ),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)

In [6]:
val mfd = tg.monixFD


Out[6]:
mfd: MonixFiniteDistribution[TermState, Term] = MonixFiniteDistribution(
  Cons(
    BaseCons(
      BasePi(
        provingground.learning.RandomVarFamily$$Lambda$3163/1607003799@23268bec,
        FuncsWithDomain
      ),
      0.65,
      BaseCons(
        BasePi(
          provingground.learning.GeneratorNode$$Lambda$3166/2054782757@13eebd15,
          FuncsWithDomain
        ),
        0.1,
        BaseCons(
          BasePi(
            provingground.learning.GeneratorNode$$Lambda$3166/2054782757@2932ab36,
            FuncsWithDomain
          ),
          0.1,
          BaseCons(
            BasePi(
              provingground.learning.GeneratorNode$$Lambda$3166/2054782757@7663841c,
              FuncsWithDomain
            ),
            0.1,
            BaseCons(
              BasePi(
                provingground.learning.TermGeneratorNodes$$Lambda$3150/740688104@601a1c58,
                FuncsWithDomain
              ),
              0.0,
              Target(FuncsWithDomain)
            )
          )
        )
      )
    ),
    Cons(
      BaseCons(
        Map(
          provingground.learning.TermRandomVars$TargetTyps$$$Lambda$3169/1164841344@2b860dcf,
          Goals,
          TargetTyps
        ),
        0.7,
        BaseCons(
          Map(
            provingground.learning.TermRandomVars$TargetTyps$$$Lambda$3170/36866988@69e5925a,
...

In [7]:
val tgT = mfd.nodeDist(ts)(tg.Gen.codomainNode(eqM(l)(r)), 0.0001)


Out[7]:
tgT: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(Async(<function2>, true, true, true), monix.eval.Task$$Lambda$3376/986006797@58d89df7),
  provingground.learning.MonixFiniteDistribution$$Lambda$3377/701679835@f0e2552,
  0
)

The above is looking at backward reasoning for $e_l = e_r$. We see that

  • we get mainly two terms, which is the use of symmetry and transitivity.
  • symmetry has no additional parameter, i.e., we are looking for exactly a proof of $e_r = e_l$.
  • transitivity has exactly one parameter, so for $x : M$ we look for $e_l = x$ and $x = e_r$.

In [8]:
import monix.execution.Scheduler.Implicits.global
val tgD = tgT.runSyncUnsafe()


Out[8]:
import monix.execution.Scheduler.Implicits.global

tgD: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(
      ($drde : eqM(e_r)(e_l)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(e_l)($drde),
      0.28319388018866914
    ),
    Weighted(
      ($drhb : eqM(e_r)(e_l)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(e_l)($drhb),
      0.006165815103151755
    ),
    Weighted(
      (_ : eqM($driz)(e_r)) ↦ (_ : eqM(e_l)($driz)) ↦ ($driz : M) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)($driz)(e_r)($drjb)($drjc),
      0.28319388018866914
    ),
    Weighted(
      ($dsel : eqM(mul(e_r)(e_l))(e_r)) ↦ ($dsek : eqM(e_l)(mul(e_r)(e_l))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)(mul(e_r)(e_l))(e_r)($dsek)($dsel),
      0.0018918021974113543
    ),
    Weighted(
      (_ : eqM($dstq)(e_r)) ↦ (_ : eqM(e_l)($dstq)) ↦ ($dstq : M) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)($dstq)(e_r)($dsts)($dstt),
      0.0406269447731658
    ),
    Weighted(
      ($dtpi : eqM(e_r)(e_r)) ↦ ($dtph : eqM(e_l)(e_r)) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)(e_r)(e_r)($dtph)($dtpi),
      0.006165815103151755
    ),
    Weighted(axiom_{eqM(a)(a)}(e_r), 0.28319388018866914),
    Weighted(
      ($duei : eqM(mul(e_l)(e_l))(e_r)) ↦ ($dueh : eqM(e_l)(mul(e_l)(e_l))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)(mul(e_l)(e_l))(e_r)($dueh)($duei),
      0.0018918021974113543
    ),
    Weighted(
      (_ : eqM(e_l)(e_l)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(e_r)($dury),
      0.0406269447731658
    ),
    Weighted(
      ($duvb : eqM(mul(e_l)(e_r))(e_r)) ↦ ($duva : eqM(e_l)(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)(mul(e_l)(e_r))(e_r)($duva)($duvb),
      0.0018918021974113543
    ),
    Weighted(
      ($dvij : eqM(e_l)(e_r)) ↦ ($dvii : eqM(e_l)(e_l)) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_l)(e_l)(e_r)($dvii)($dvij),
      0.006165815103151755
...

In [9]:
val tgT1 = mfd.nodeDist(ts)(tg.Gen.codomainNode(eqM(op(l)(r))(r)), 0.0001)


Out[9]:
tgT1: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(Async(<function2>, true, true, true), monix.eval.Task$$Lambda$3376/986006797@4fb89073),
  provingground.learning.MonixFiniteDistribution$$Lambda$3377/701679835@1d9a1da8,
  0
)

We are next targeting $e_l * e_r = e_r$. Here the main thing we see is we get a proof with weight $0.3$ straightaway. We also get symmetry and transitivity as before.


In [10]:
val tgD1 = tgT1.runSyncUnsafe()


Out[10]:
tgD1: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(
      ($iwmn : eqM(e_r)(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(mul(e_l)(e_r))($iwmn),
      0.19042831519470402
    ),
    Weighted(
      (_ : eqM(mul(e_l)(e_r))(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(e_r)($iwrm),
      0.008328583899404744
    ),
    Weighted(
      (_ : eqM($iwvw)(e_r)) ↦ (_ : eqM(mul(e_l)(e_r))($iwvw)) ↦ ($iwvw : M) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(mul(e_l)(e_r))($iwvw)(e_r)($iwvy)($iwvz),
      0.19042831519470402
    ),
    Weighted(axiom_{eqM(mul(e_l)(a))(a)}(e_r), 0.19042831519470402),
    Weighted(
      ($iyda : eqM(e_r)(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(mul(e_l)(e_r))($iyda),
      0.001272106250653001
    ),
    Weighted(
      ($iyfx : eqM(e_l)(e_r)) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(mul(e_l)(e_r))(e_l)(e_r)(axiom_{eqM(mul(a)(e_r))(a)}(e_l))($iyfx),
      0.0013051685078530825
    ),
    Weighted(axiom_{eqM(mul(a)(e_r))(a)}(e_r), 0.19042831519470402),
    Weighted(axiom_{eqM(a)(a)}(e_r), 0.19042831519470402),
    Weighted(
      ($iysy : eqM(e_r)(e_r)) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(mul(e_l)(e_r))(e_r)(e_r)(axiom_{eqM(mul(e_l)(a))(a)}(e_r))($iysy),
      0.0013051685078530825
    ),
    Weighted(
      (_ : eqM($izbk)(e_r)) ↦ (_ : eqM(mul(e_l)(e_r))($izbk)) ↦ ($izbk : M) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(mul(e_l)(e_r))($izbk)(e_r)($izbm)($izbn),
      0.008328583899404744
    ),
    Weighted(
      ($jarh : eqM(e_r)(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(e_r)(mul(e_l)(e_r))($jarh),
      0.02731881296131129
    )
  )
)

In [11]:
ts.terms


Out[11]:
res10: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(e_l, 0.047619047619047616),
    Weighted(e_r, 0.047619047619047616),
    Weighted(mul, 0.047619047619047616),
    Weighted(eqM, 0.047619047619047616),
    Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
    Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
    Weighted(axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}, 0.047619047619047616),
    Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
    Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
    Weighted(eqM, 0.2857142857142857),
    Weighted(mul, 0.2857142857142857)
  )
)

In [12]:
tgD1.filter(_.typ == eqM(op(l)(r))(r))


Out[12]:
res11: FiniteDistribution[Term] = FiniteDistribution(
  Vector(Weighted(axiom_{eqM(mul(e_l)(a))(a)}(e_r), 0.19042831519470402))
)

In [13]:
val tgT2 = mfd.nodeDist(ts)(tg.Gen.codomainNode(eqM(r)(op(l)(r))), 0.0001)


Out[13]:
tgT2: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(Async(<function2>, true, true, true), monix.eval.Task$$Lambda$3376/986006797@6d00a5da),
  provingground.learning.MonixFiniteDistribution$$Lambda$3377/701679835@708042c7,
  0
)

This is essentially the same as above. Just verifies that we get symmetry. If this is chained with the previous example, we get a proof directly.


In [14]:
val tgD2 = tgT2.runSyncUnsafe()


Out[14]:
tgD2: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(
      ($obmg : eqM(mul(e_r)(e_r))(mul(e_l)(e_r))) ↦ ($obmf : eqM(e_r)(mul(e_r)(e_r))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)(mul(e_r)(e_r))(mul(e_l)(e_r))($obmf)($obmg),
      0.0019102078286236943
    ),
    Weighted(
      ($occi : eqM(mul(e_l)(e_r))(e_r)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(mul(e_l)(e_r))(e_r)($occi),
      0.28594911650643884
    ),
    Weighted(
      ($ochl : eqM(mul(e_l)(e_r))(e_r)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(mul(e_l)(e_r))(e_r)($ochl),
      0.012506287236483256
    ),
    Weighted(
      (_ : eqM($ocmk)(mul(e_l)(e_r))) ↦ (_ : eqM(e_r)($ocmk)) ↦ ($ocmk : M) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)($ocmk)(mul(e_l)(e_r))($ocmm)($ocmn),
      0.28594911650643884
    ),
    Weighted(
      ($odpy : eqM(e_r)(mul(e_l)(e_r))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)(e_r)(mul(e_l)(e_r))(axiom_{eqM(a)(a)}(e_r))($odpy),
      0.0024969299534841087
    ),
    Weighted(axiom_{eqM(a)(a)}(mul(e_l)(e_r)), 0.28594911650643884),
    Weighted(
      ($odyx : eqM(e_r)(mul(e_l)(e_r))) ↦ ($odyw : eqM(e_r)(e_r)) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)(e_r)(mul(e_l)(e_r))($odyw)($odyx),
      0.006225803255754271
    ),
    Weighted(
      ($oemr : eqM(mul(e_l)(e_l))(mul(e_l)(e_r))) ↦ ($oemq : eqM(e_r)(mul(e_l)(e_l))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)(mul(e_l)(e_l))(mul(e_l)(e_r))($oemq)($oemr),
      0.0019102078286236943
    ),
    Weighted(
      ($ofdl : eqM(mul(e_r)(e_l))(mul(e_l)(e_r))) ↦ ($ofdk : eqM(e_r)(mul(e_r)(e_l))) ↦ axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))}(e_r)(mul(e_r)(e_l))(mul(e_l)(e_r))($ofdk)($ofdl),
      0.0019102078286236943
    ),
    Weighted(
      (_ : eqM(mul(e_r)(e_l))(e_r)) ↦ axiom_{(eqM(a)(b) \to eqM(b)(a))}(mul(e_l)(e_l))(e_l)($ofup),
      0.012506287236483256
    ),
    Weighted(
      (_ : eqM($ogcn)(mul(e_l)(e_r))) ↦ (_ : eqM(e_r)($ogcn)) ↦ ($ogcn : M) ↦ axiom_{(eqM(a)(b) \to ...

In [15]:
val tgtT2 =  mfd.nodeDist(ts)(tg.Gen.foldedTargetFunctionNode(eqM(r)(op(l)(r))), 0.0001)


Out[15]:
tgtT2: monix.eval.Task[FiniteDistribution[Term]] = FlatMap(
  Map(
    FlatMap(Async(<function2>, true, true, true), monix.eval.Task$$Lambda$3376/986006797@310fc890),
    provingground.learning.MonixFiniteDistribution$$Lambda$3568/1377180820@54d8cd99,
    0
  ),
  provingground.learning.MonixFiniteDistribution$$Lambda$3569/393696762@36008355
)

And indeed, here is a proof of the lemma.


In [16]:
val tgtD2 = tgtT2.runSyncUnsafe()


Out[16]:
tgtD2: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(
      axiom_{(eqM(a)(b) \to eqM(b)(a))}(mul(e_l)(e_r))(e_r)(axiom_{eqM(mul(e_l)(a))(a)}(e_r)),
      0.2984554037429222
    )
  )
)

Identity type

We test backward reasoning using the standard equality instead of a custom equality type. This now works.


In [17]:
import IdentityTyp._
symmTerm


Out[17]:
import IdentityTyp._

res16_1: FuncLike[Typ[Term], FuncLike[Term, FuncLike[Term, Func[Equality[Term], Equality[Term]]]]] = ($xbbx : 𝒰 ) ↦ ($xbce : $xbbx) ↦ ($xbcf : $xbbx) ↦ ($xbcg : $xbce = $xbcf) ↦ induc_{ ($xbom : $xbbx) ↦ ($xbon : $xbbx) ↦ $xbom = $xbon ; ($xbby : $xbbx) ↦ ($xbbz : $xbbx) ↦ (_ : $xbby = $xbbz) ↦ $xbbz = $xbby }(($xbby : $xbbx) ↦ Refl($xbbx,$xbby))($xbcg)

In [18]:
transTerm


Out[18]:
res17: FuncLike[Typ[Term], FuncLike[Term, FuncLike[Term, FuncLike[Term, Func[Equality[Term], Func[Equality[Term], Equality[Term]]]]]]] = ($xbbx : 𝒰 ) ↦ ($xbop : $xbbx) ↦ ($xboq : $xbbx) ↦ ($xbor : $xbbx) ↦ ($xbpy : $xbop = $xboq) ↦ induc_{ ($xfly : $xbbx) ↦ ($xflz : $xbbx) ↦ $xfly = $xflz ; ($xbos : $xbbx) ↦ ($xbot : $xbbx) ↦ (_ : $xbos = $xbot) ↦ ($xbot = $xbor → $xbos = $xbor) }(($xbos : $xbbx) ↦ ($xcqx : $xbos = $xbor) ↦ $xcqx)($xbpy)

In [19]:
symmTerm (M) (l) (r)


Out[19]:
res18: Func[Equality[Term], Equality[Term]] = ($xbcg : e_l = e_r) ↦ induc_{ ($xfmz : M) ↦ ($xfna : M) ↦ $xfmz = $xfna ; ($xbby : M) ↦ ($xbbz : M) ↦ (_ : $xbby = $xbbz) ↦ $xbbz = $xbby }(($xbby : M) ↦ Refl(M,$xbby))($xbcg)

In [20]:
symmTerm(M)(l)(r).typ


Out[20]:
res19: Typ[Func[Equality[Term], Equality[Term]]] = (e_l = e_r → e_r = e_l)

In [21]:
transTerm(M).typ


Out[21]:
res20: Typ[FuncLike[Term, FuncLike[Term, FuncLike[Term, Func[Equality[Term], Func[Equality[Term], Equality[Term]]]]]]] = ∏($xbop : M){ ∏($xboq : M){ ∏($xbor : M){ ($xbop = $xboq → ($xboq = $xbor → $xbop = $xbor)) } } }

In [22]:
val ts0 = TermState(FiniteDistribution.unif(symmTerm, transTerm, l, r), FiniteDistribution.unif(M))


Out[22]:
ts0: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(
        ($xbbx : 𝒰 ) ↦ ($xbce : $xbbx) ↦ ($xbcf : $xbbx) ↦ ($xbcg : $xbce = $xbcf) ↦ induc_{ ($xbom : $xbbx) ↦ ($xbon : $xbbx) ↦ $xbom = $xbon ; ($xbby : $xbbx) ↦ ($xbbz : $xbbx) ↦ (_ : $xbby = $xbbz) ↦ $xbbz = $xbby }(($xbby : $xbbx) ↦ Refl($xbbx,$xbby))($xbcg),
        0.25
      ),
      Weighted(
        ($xbbx : 𝒰 ) ↦ ($xbop : $xbbx) ↦ ($xboq : $xbbx) ↦ ($xbor : $xbbx) ↦ ($xbpy : $xbop = $xboq) ↦ induc_{ ($xfly : $xbbx) ↦ ($xflz : $xbbx) ↦ $xfly = $xflz ; ($xbos : $xbbx) ↦ ($xbot : $xbbx) ↦ (_ : $xbos = $xbot) ↦ ($xbot = $xbor → $xbos = $xbor) }(($xbos : $xbbx) ↦ ($xcqx : $xbos = $xbor) ↦ $xcqx)($xbpy),
        0.25
      ),
      Weighted(e_l, 0.25),
      Weighted(e_r, 0.25)
    )
  ),
  FiniteDistribution(Vector(Weighted(M, 1.0))),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)

In [23]:
val tgT0 = mfd.nodeDist(ts0)(tg.Gen.codomainNode(l =:= r), 0.001)


Out[23]:
tgT0: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(Async(<function2>, true, true, true), monix.eval.Task$$Lambda$3376/986006797@1c350c7f),
  provingground.learning.MonixFiniteDistribution$$Lambda$3377/701679835@76b2ae92,
  0
)

In [24]:
Unify.targetCodomain(symmTerm, l =:= r)


Out[24]:
res23: Option[Term] = Some(
  ($xbcg : e_r = e_l) ↦ induc_{ ($xgrv : M) ↦ ($xgrw : M) ↦ $xgrv = $xgrw ; ($xbby : M) ↦ ($xbbz : M) ↦ (_ : $xbby = $xbbz) ↦ $xbbz = $xbby }(($xbby : M) ↦ Refl(M,$xbby))($xbcg)
)