Type.Zero, One and Star
InΒ [1]:
import $cp.bin.`provingground-core-jvm-d7193b6a8f.fat.jar`
import provingground._ , interface._, HoTT._, learning._
repl.pprinter() = {
val p = repl.pprinter()
p.copy(
additionalHandlers = p.additionalHandlers.orElse {
translation.FansiShow.fansiHandler
}
)
}
Out[1]:
InΒ [2]:
val terms = FiniteDistribution.unif[Term](Unit, Zero, Star)
val typs = FiniteDistribution.unif[Typ[Term]](Type, Unit, Zero)
val ts = TermState(terms, typs)
val ts0 = TermState(FiniteDistribution(), FiniteDistribution.unif(Type))
Out[2]:
InΒ [3]:
import monix.execution.Scheduler.Implicits.global
val lp = LocalProver(ts).sharpen(10)
val lp0 = LocalProver(ts0).sharpen(10)
Out[3]:
InΒ [4]:
val unknownsT = lp0.unknownStatements.map(_.entropyVec.map(_.elem)).memoize
val unF = unknownsT.runToFuture
InΒ [5]:
import StrategicProvers._
Out[5]:
InΒ [6]:
import almond.display._
val chompView = Markdown("## Results from Goal chomping\n")
Out[6]:
InΒ [7]:
val chT = unknownsT.flatMap(typs => goalChomper(lp, typs)).memoize
Out[7]:
InΒ [8]:
val chF = chT.runToFuture
InΒ [27]:
currentGoal
successes.size
unF.map(_.size)
successes.reverse
InΒ [21]:
chF.foreach(s => chompView.withContent(s"## Goal chomping done\n\n * chomped: ${successes.size}").update())
InΒ [29]:
chF.value
Out[29]:
InΒ [30]:
chF.map(_._3.headOption)