Welcome to VapourSynth in Colab!

Basic usage instructions: run the setup script, and run all the tabs in the "processing" script for example output.

For links to instructions, tutorials, and help, see https://github.com/AlphaAtlas/VapourSynthColab

Init


In [0]:
#@title Check GPU
#@markdown Run this to connect to a Colab Instance, and see what GPU Google gave you.

gpu = !nvidia-smi --query-gpu=gpu_name --format=csv
print(gpu[1])
print("The Tesla T4 and P100 are fast and support hardware encoding. The K80 and P4 are slower.")
print("Sometimes resetting the instance in the 'runtime' tab will give you a different GPU.")

In [0]:
#@title Setup {display-mode: "form"}
#@markdown Run this to install VapourSynth, VapourSynth plugins and scripts, as well as some example upscaling models.
#NOTE: running this more than once may or may not work. 
#The buggy console output is due to the threaded installing
#Currently TPU support is broken and incomplete, but it isn't particularly useful since it doesn't support opencl anyway 

#Init
import os, sys, shutil, tempfile
import collections
from datetime import datetime, timedelta
import requests
import threading
import ipywidgets as widgets
from IPython import display
import PIL
from google.colab import files
import time
%cd /


#Function defs
#---------------------------------------------------------

#Like shutil.copytree(), but doesn't complain about existing directories
#Note this is fixed in newer version of Python 3
def copytree(src, dst, symlinks=False, ignore=None):
  for item in os.listdir(src):
    s = os.path.join(src, item)
    d = os.path.join(dst, item)
    if os.path.isdir(s):
      shutil.copytree(s, d, symlinks, ignore)
    else:
      shutil.copy2(s, d)

#Download and extract the .py scripts from the VapourSynth fatpack
def download_fatpack_scripts():
  %cd /
  print("Downloading VS FatPack Scripts...")
  dlurl = r"https://github.com/theChaosCoder/vapoursynth-portable-FATPACK/releases/download/r3/VapourSynth64Portable_2019_11_02.7z"
  with tempfile.TemporaryDirectory() as t:
    dpath = os.path.join(t, "VapourSynth64Portable_2019_11_02.7z")
    os.chdir(t)
    !wget {dlurl}
    %cd /
    !7z x -o{t} {dpath}
    scriptsd = os.path.abspath(os.path.join(t, "VapourSynth64Portable", "Scripts"))
    s = os.path.normpath("VapourSynthImports")
    os.makedirs(s, exist_ok = True)
    copytree(scriptsd, s)
    sys.path.append(s)
  
  #Get some additional scripts.
  !wget -O /VapourSynthImports/muvsfunc_numpy.py https://raw.githubusercontent.com/WolframRhodium/muvsfunc/master/Collections/muvsfunc_numpy.py
  !wget -O /VapourSynthImports/edi_rpow2.py https://gist.githubusercontent.com/YamashitaRen/020c497524e794779d9c/raw/2a20385e50804f8b24f2a2479e2c0f3c335d4853/edi_rpow2.py
  !wget -O /VapourSynthImports/BMToolkit.py https://raw.githubusercontent.com/IFeelBloated/BlockMatchingToolkit/master/BMToolkit.py
  if accelerator == "CUDA":
    !wget -O /VapourSynthImports/Alpha_CuPy.py https://raw.githubusercontent.com/AlphaAtlas/VapourSynth-Super-Resolution-Helper/master/Scripts/Alpha_CuPy.py
    !wget -O /VapourSynthImports/dpid.cu https://raw.githubusercontent.com/WolframRhodium/muvsfunc/master/Collections/examples/Dpid_cupy/dpid.cu
    !wget -O /VapourSynthImports/bilateral.cu https://raw.githubusercontent.com/WolframRhodium/muvsfunc/master/Collections/examples/BilateralGPU_cupy/bilateral.cu

  #Get an example model:
  import gdown
  gdown.download(r"https://drive.google.com/uc?id=1KToK9mOz05wgxeMaWj9XFLOE4cnvo40D", "/content/4X_Box.pth", quiet=False)

def getdep1():
  %cd /
  #Install apt-fast, for faster installing
  !/bin/bash -c "$(curl -sL https://git.io/vokNn)"
  #Get some basic dependancies
  !apt-fast install -y -q -q subversion davfs2 p7zip-full p7zip-rar ninja-build 

#Get VapourSynth and ImageMagick built just for a colab environment
def getvs():
  %cd /
  #%cd var/cache/apt/archives
  #Artifacts hosted on bintray. If they fail to install, they can be built from source. 
  !curl -L "https://dl.bintray.com/alphaatlas100/vapoursynth-colab/imagemagick_7.0.9-8-1_amd64.deb" -o /var/cache/apt/archives/imagemagick.deb
  !dpkg -i /var/cache/apt/archives/imagemagick.deb
  !ldconfig /usr/local/lib
  !curl -L "https://dl.bintray.com/alphaatlas100/vapoursynth-colab/vapoursynth_48-1_amd64.deb" -o /var/cache/apt/archives/vapoursynth.deb
  !dpkg -i /var/cache/apt/archives/vapoursynth.deb
  !ldconfig /usr/local/lib
  #%cd /

def getvsplugins():
  %cd /
  #Allow unauthenticated sources
  if not os.path.isfile("/etc/apt/apt.conf.d/99myown"):
    with open("/etc/apt/apt.conf.d/99myown", "w+") as f:
      f.write(r'APT::Get::AllowUnauthenticated "true";')
  sources = "/etc/apt/sources.list"
  #Backup original apt sources file, just in case
  with tempfile.TemporaryDirectory() as t:
    tsources = os.path.join(t, os.path.basename(sources))
    shutil.copy(sources, tsources)
    #Add deb-multimedia repo
    #Because building dozens of VS plugins is not fun, and takes forever
    with open(sources, "a+") as f:
      deb = "deb https://www.deb-multimedia.org sid main non-free\n"
      if not "deb-multimedia" in f.read():
        f.write(deb)

    with open(sources, "a+") as f:
      #Temporarily use Debian unstable for some required dependencies 
      if not "ftp.us.debian.org" in f.read():
        f.write("deb http://ftp.us.debian.org/debian/ sid main\n")
    !add-apt-repository -y ppa:deadsnakes/ppa
    !apt-fast update -oAcquire::AllowInsecureRepositories=true
    !apt-fast install -y --allow-unauthenticated deb-multimedia-keyring
    !apt-fast update  

    #Parse plugins to install
    out = !apt-cache search vapoursynth
    vspackages = ""
    #exclude packages with these strings in the name
    exclude = ["waifu", "wobbly", "editor", "dctfilter", "vapoursynth-dev", "vapoursynth-doc"]
    for line in out:
      p =  line.split(" - ")[0].strip()
      if not any(x in p for x in exclude) and "vapoursynth" in p and p != "vapoursynth":
        vspackages = vspackages + p + " "
    print(vspackages)
    #Install VS plugins and a newer ffmpeg build
    !apt-fast install -y --allow-unauthenticated --no-install-recommends ffmpeg youtube-dl libzimg-dev {vspackages} libfftw3-3 libfftw3-double3 libfftw3-dev libfftw3-bin libfftw3-double3 libfftw3-single3 checkinstall
    #Get a tiny example video
    !youtube-dl -o /content/enhance.webm -f 278 https://www.youtube.com/watch?v=I_8ZH1Ggjk0
    #Restore original sources
    os.remove(sources)
    shutil.copy(tsources, sources)
  #Congrats! Apt may or may not be borked.
  copytree("/usr/lib/x86_64-linux-gnu/vapoursynth", "/usr/local/lib/vapoursynth")
  !ldconfig /usr/local/lib/vapoursynth

#Install vapoursynth python modules
def getpythonstuff():
  %cd /
  !python3.6 -m pip install vapoursynth meson opencv-python

def cudastuff():
    %cd /
    out = !nvcc --version
    cudaver = (str(out).split("Cuda compilation tools, release ")[1].split(", ")[0].replace(".", ""))
    #Note this download sometimes times out
    !python3.6 -m pip install mxnet-cu{cudaver} #cupy-cuda{cudaver}
    !pip install git+https://github.com/AlphaAtlas/VSGAN.git

    #Mxnet stuff
    
    modelurl = "https://github.com/WolframRhodium/Super-Resolution-Zoo/trunk"
    if os.path.isdir("/NeuralNetworks"):
      !svn update --set-depth immediates /NeuralNetworks
      !svn update --set-depth infinity /NeuralNetworks/ARAN
    else:
      !svn checkout --depth immediates {modelurl} /NeuralNetworks

def makesrcd(name):
  %cd /
  srpath = os.path.abspath(os.path.join("/src", name))
  os.makedirs(srpath, exist_ok = False)
  %cd {srpath}

def mesongit(giturl):
  p = os.path.basename(giturl)[:-4]
  makesrcd(p)
  !git clone {giturl}
  %cd {p}
  !meson build
  !ninja -C build
  !ninja -C build install

#Taken from https://stackoverflow.com/a/31614591
#Allows exceptions to be caught from threads
from threading import Thread

class PropagatingThread(Thread):
    def run(self):
        self.exc = None
        try:
            if hasattr(self, '_Thread__target'):
                # Thread uses name mangling prior to Python 3.
                self.ret = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
            else:
                self.ret = self._target(*self._args, **self._kwargs)
        except BaseException as e:
            self.exc = e

    def join(self):
        super(PropagatingThread, self).join()
        if self.exc:
            raise self.exc
        return self.ret


#Interpolation experiment
#%cd /
#os.makedirs("/videotools")
#%cd /videotools
#!git clone https://github.com/sniklaus/pytorch-sepconv.git
#%cd /

#Function for testing vapoursynth scripts
#Takes the path of the script, and a boolean for generating a test frame.

#-----------------------------------------------------------

#Init functions are threaded for speed
#"PropagatingThread" class is used to return exceptions from threads, otherwise they fail silently

t1 = PropagatingThread(target = getdep1)
t1.start()
print("apt init thread started")

t2 = PropagatingThread(target = download_fatpack_scripts)
t2.start()
print("VS script downloader thread started.")

#Get rid of memory usage log spam from MXnet
os.environ["TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD"] = "107374182400"

#Check for an accelerator
accelerator = None
gpu = None
if 'COLAB_TPU_ADDR' in os.environ:
  #WIP
  raise Exception("TPUs are (currently) not supported! Please use a GPU or CPU instance.")
else:
  #Check for Nvidia GPU, and identify it 
  out = !command -v nvidia-smi
  if out != []:
    out = !nvidia-smi
    for l in out:
      if "Driver Version" in l:
        accelerator = "CUDA"
        print("Nvidia GPU detected:")
        gpu = !nvidia-smi --query-gpu=gpu_name --format=csv
        gpu = gpu[1]
        #print("Tesla K80 < Tesla T4 < Tesla P100")
        break
if accelerator == None:
  print("Warning: No Accelerator Detected!")

t1.join()
print("Apt init thread done.")

t1 = PropagatingThread(target = getvs)
t1.start()
print("Vapoursynth/Imagemagick downloader thread started.")
t1.join()
print("Vapoursynth/Imagemagick installed")

t3 = PropagatingThread(target = getpythonstuff)
t3.start()
print("Pip thread started")

t1 = PropagatingThread(target = getvsplugins)
t1.start()
print("VS plugin downloader thread started.")

t3.join()
print("pip thread done")

if accelerator == "TPU":
  #WIP!
  pass

elif accelerator == "CUDA":
  t3 = PropagatingThread(target = cudastuff)
  t3.start()
  print("CUDA pip thread started.")
else:
  pass

t2.join()
print("VS script downloader thread done.")

t3.join()
print("CUDA pip thread done.")

t1.join()
print("VS plugin thread done.")



#Build some more plugins(s)
#TODO: Build without changing working directory, or try the multiprocessing module, so building can run asynchronously 
print("Building additional plugins")
mesongit(r"https://github.com/HomeOfVapourSynthEvolution/VapourSynth-DCTFilter.git")
mesongit(r"https://github.com/HomeOfVapourSynthEvolution/VapourSynth-TTempSmooth.git")

googpath = None
%cd /

Clear_Console_Output_When_Done = True #@param {type:"boolean"}
if Clear_Console_Output_When_Done:
  display.clear_output()
#if gpu is not None:
#  print(gpu[1])
#  print("A Tesla T4 or P100 is significantly faster than a K80")
#  print("And the K80 doesn't support hardware encoding.")

In [0]:
#@title Mount Google Drive
#@markdown Highly recommended!

import os
%cd /

#Check if Google Drive is mounted, and mount if its not.
googpath = os.path.abspath(os.path.join("gdrive", "My Drive"))
if not os.path.isdir(googpath):
  from google.colab import drive
  drive.mount('/gdrive', force_remount=True)

In [0]:
#@title Mount a Nextcloud Drive

import os
nextcloud = "/nextcloud"
os.makedirs(nextcloud, exist_ok=True)
Nextcloud_URL = "https://us.hostiso.cloud/remote.php/webdav/" #@param {type:"string"}

%cd /
if os.path.isfile("/etc/fstab"):
  os.remove("/etc/fstab")
with open("/etc/fstab" , "a") as f:
  f.write(Nextcloud_URL + " " + nextcloud + " davfs user,rw,auto 0 0")
!mount {nextcloud}

Processing


In [0]:
%%writefile /content/autogenerated.vpy

#This is the Vapoursynth Script!
#Running this cell will write the code in this cell to disk, for VSPipe to read.
#Later cells will check to see if it executes.
#Edit it just like a regular python VS script.
#Search for functions and function reference in http://vsdb.top/, or browse the "VapourSynthImports" folder. 

#Import functions
import sys, os, cv2
sys.path.append('/VapourSynthImports')
import vapoursynth as vs
import vsgan as VSGAN
import mvsfunc as mvf
#import muvsfunc as muf
#import fvsfunc as fvf
import havsfunc as haf
import Alpha_CuPy as ape
import muvsfunc_numpy as mufnp
#import BMToolkit as bm
import G41Fun as G41
#import vsutil as util
#import edi_rpow2 as edi
#import kagefunc as kage
#import lostfunc as lost
#import vsTAAmbk as taa
#import xvs as xvs
from vapoursynth import core

#Set RAM cache size, in MB
core.max_cache_size = 10500

#Get Video(s) or Image(s). ffms2 (ffmpeg) or imwri (imagemagick) will read just about anything.
#Lsmash sometimes works if ffms2 failes, d2v reads mpeg2 files
clip = core.ffms2.Source(r"/content/enhance.webm")
#clip = core.lsmas.LWLibavSource("/tmp/%d.png")
#clip = core.imwri.Read("testimage.tiff")

#Store source for previewing
src = clip

#Convert to 16 bit YUV for preprocessing
#clip = core.resize.Spline36(clip, format = vs.YUV444P16)

#Deinterlace
#clip = G41.QTGMC(clip, Preset='Medium')

#Mild deblocking
#clip = fvf.AutoDeblock(clip)

#Convert to floating point RGB
clip = mvf.ToRGB(clip, depth = 32)

#Spatio-temportal GPU denoiser. https://github.com/Khanattila/KNLMeansCL/wiki/Filter-description
clip = core.knlm.KNLMeansCL(clip, a = 8, d = 4, h = 1.4)


preupscale = clip
#Run ESRGAN model. See https://upscale.wiki/wiki/Model_Database
vsgan_device = VSGAN.VSGAN()
vsgan_device.load_model(model=r"/content/4X_Box.pth", scale=4)
clip = vsgan_device.run(clip=clip, chunk = False,  pad = 16)

clip = core.knlm.KNLMeansCL(clip, a = 7, d = 3, h = 1.4)



#Run MXNet model. See the "MXNet" cell.
#Tensorflow models are also supported!
#sr_args = dict(model_filename=r'/NeuralNetworks/ARAN/aran_c0_s1_x4', up_scale=4, device_id=0, block_w=256, block_h=128, is_rgb_model=True, pad=None, crop=None, pre_upscale=False)
#clip = mufnp.super_resolution(clip, **sr_args)

#HQ downscale on the GPU with dpid
#clip = ape.GPU_Downscale(clip, width = 3840, height = 2160)

#Convert back to YUV 444 format/Rec 709 colorspace
clip = core.resize.Spline36(clip, format = vs.YUV444P16, matrix_s = "709")

#Strong temporal denoiser and stabilizer with the LR as a motion reference clip, for stabilizing.
prefilter = core.resize.Spline36(preupscale, format = clip.format, width = clip.width, height = clip.height, matrix_s = "709")
clip = G41.SMDegrain(clip, tr=3, RefineMotion=True, pel = 1, prefilter = prefilter)

#Another CPU denoiser/stabilizer. "very high" is very slow.
#clip = haf.MCTemporalDenoise(clip, settings = "very high", useTTmpSm = True, maxr=4, stabilize = True)

#Stabilized Anti Aliasing, with some GPU acceleration
#clip = taa.TAAmbk(clip, opencl=True, stabilize = 3)

#Example sharpeners that work well on high-res images
#Masks or mvf.limitfilter are good ways to keep artifacts in check
#clip = core.warp.AWarpSharp2(clip)
#clip = G41.NonlinUSM(clip, z=3, sstr=0.28, rad=9, power=1)

#High quality, strong debanding
#clip = fvf.GradFun3(clip, smode = 2)

#Convert back to 8 bit YUV420 for output. 
clip = core.resize.Spline36(clip, format = vs.YUV420P8, matrix_s = "709", dither_type = "error_diffusion")

#Interpolate to double the source framerate
#super = core.mv.Super(inter)
#backward_vectors = core.mv.Analyse(super, isb = True,  overlap=4, search = 3)
#forward_vectors = core.mv.Analyse(super, isb = False, overlap=4, search = 3)
#inter = core.mv.FlowFPS(inter, super, backward_vectors, forward_vectors, num=0, den=0)

#Stack the source on top of the processed clip for comparison
src = core.resize.Point(src, width = clip.width, height = clip.height, format = clip.format)
#clip = core.std.StackVertical([clip, src])
#Alternatively, interleave the source and slow down the framerate for easy comparison.
clip = core.std.Interleave([clip, src])
clip = core.std.AssumeFPS(clip, fpsnum = 2)

#clip = core.std.SelectEvery(clip=clip, cycle=48, offsets=[0,1])

clip.set_output()

In [0]:
#@title Preview Options
#@markdown Run this cell to check the .vpy script, and set preview options. 
#@markdown * Software encoding is relatively slow on colab's single CPU core, but returns a smaller video.
#@markdown * Hardware encoding doesn't work on older GPUs or a TPU, but can be faster.
#@markdown * Sometimes video previews don't work. Chrome seems more reliable than Firefox, but its video player doesn't support scrubbing. Alternatively, you can download the preview in the "/content" folder with the Colab UI.
#@markdown * HEVC support in browsers is iffy.
#@markdown * PNG previews are more reliable, but less optimal.  
#@markdown * In video previews, you can interleave the source and processed clips and change the framerate for easy comparisons. 
#@markdown ***

#TODO: Make vpy file path editable
vpyscript = "/content/autogenerated.vpy"
#@markdown Use hardware encoding.
Hardware_Encoding = True #@param {type:"boolean"}
#@markdown Encode preview as lossless or high quality lossy video
Lossless = False #@param {type:"boolean"}
#@markdown Use HEVC instead of AVC for preview. Experimental.
HEVC = False #@param {type:"boolean"}
#@markdown Generate a single PNG instead of a video.
Write_PNG = False #@param {type:"boolean"}
#@markdown Don't display any video preview, just write it to /content
Display_Video = False #@param {type:"boolean"}
#@markdown Number of preview frames to generate
preview_frames =  120 #@param {type:"integer"}
#Check script with test frame (for debugging)
Test_Frame = False 

from IPython.display import clear_output
import ipywidgets as widgets
from pprint import pprint



def checkscript(vpyfile, checkoutput):
  
  #Clear the preview cache folder, as the script could have changed
  
  quotepath = r'"' + vpyfile + r'"'
  print("Testing script...")
  if checkoutput:
  #See if the script will really output a frame
    test = !vspipe -y -s 0 -e 0 {quotepath} .
  #Parse the script, and return information about it. 
  rawinfo = !vspipe -i {quotepath} -
  #Store clip properties as a dict
  #I really need to learn regex...
  clipinfo = eval(r"{" + str(rawinfo)[1:].replace(r"\n", r"','").replace(r": ", r"': '")[:-1] + r"}")
  !clear
  if not isinstance(clipinfo, dict):
    print(rawinfo)
    raise Exception("Error parsing VapourSynth script!")
  #print("Script output properties: ")
  #!echo {clipinfo}
  return clipinfo, rawinfo, quotepath

#Make a preview button, and a frame slider
#Note that the slider won't appear with single frame scripts
%cd /
#display.clear_output()
!clear
clipinfo, rawinfo, quotepath = checkscript(vpyscript, Test_Frame)
frameslider = None
drawslider = int(clipinfo["Frames"]) > 1
if drawslider:
  frameslider = widgets.IntSlider(value=0, max=(int(clipinfo["Frames"]) - 1), layout=widgets.Layout(width='100%', height='150%'))
else:
  preview_frames = 1
fv = None


if not(preview_frames > 0 and preview_frames <= int(clipinfo["Frames"])):
  raise Exception("preview_frames must be a valid integer")
if drawslider:
  fv = int(frameslider.value)
else:
  fv = 0

encstr = ""
previewfile = r"/usr/local/share/jupyter/nbextensions/preview.mp4"
if os.path.isfile(previewfile):
  os.remove(previewfile)
ev = min((int(fv + preview_frames - 1), int(clipinfo["Frames"])- 1))
enctup = (Hardware_Encoding, HEVC, Lossless) 
if enctup == (True, True, True):
  encstr = r"-c:v hevc_nvenc -profile main10 -preset lossless -spatial_aq:v 1 -aq-strength 15 "
elif enctup == (True, True, False):
  encstr = r"-c:v hevc_nvenc -pix_fmt yuv420p10le -preset:v medium -profile:v main10 -spatial_aq:v 1 -aq-strength 15 -rc:v constqp -qp:v 9"
elif enctup == (True, False, True):
  encstr = r"-c:v h264_nvenc -preset lossless -profile high444p -spatial-aq 1 -aq-strength 15"
elif enctup == (False, True, True):
  encstr = r"-c:v libx265 -pix_fmt yuv420p10le -preset slow -x265-params lossless=1"
elif enctup == (True, False, False):
  encstr = r"-c:v h264_nvenc -pix_fmt yuv420p -preset:v medium -rc:v constqp -qp:v 10 -spatial-aq 1 -aq-strength 15"
elif enctup == (False, False, True):
  encstr = r"-c:v libx264 -preset veryslow -crf 0"
elif enctup == (False, True, False):
  encstr = r"-c:v libx265 -pix_fmt yuv420p10le -preset slow -crf 9"
elif enctup == (False, False, False):
  encstr = r"-c:v libx264 -pix_fmt yuv420p -preset veryslow -crf 9"
else:
  raise Exception("Invalid parameters!")
clear_output()
print(*rawinfo, sep = ' ')
print("Select the frame(s) you want to preview with the slider and 'preview frames', then run the next cell.")
display.display(frameslider)

In [0]:
#@title Generate Preview

import os, time


previewdisplay = r"""
<video controls autoplay>
  <source src="/nbextensions/preview.mp4" type='video/mp4;"'>
  Your browser does not support the video tag.
</video>
"""
previewpng = "/content/preview" + str(frameslider.value) + ".png"
if os.path.isfile(previewfile):
  os.remove(previewfile)
if os.path.isfile(previewpng):
  os.remove(previewpng)
frames = str(clipinfo["Frames"])
end = min(frameslider.value + preview_frames - 1, int(clipinfo["Frames"]) - 1)
if Write_PNG:
  !vspipe -y -s {frameslider.value} -e {frameslider.value} /content/autogenerated.vpy - | ffmpeg-y -hide_banner -loglevel warning -i pipe: {previewpng} 
  if os.path.isfile(previewpng):
    import PIL
    display.display(PIL.Image.open(previewpng, mode='r'))
  else:
    raise Exception("Error generating preview!")
else:
  out = !vspipe --progress -y -s {frameslider.value} -e {end} /content/autogenerated.vpy - | ffmpeg -y -hide_banner -progress pipe:1 -loglevel warning -i pipe: {encstr} {previewfile} | grep "fps"
  if os.path.isfile(previewfile):
    if os.path.isfile("/content/preview.mp4"):
      os.remove("/content/preview.mp4")
    !ln {previewfile} "/content/preview.mp4"
    clear_output()
    for temp in out:
      if "Output" in temp:
        print(temp)
    if Display_Video:
      display.display(display.HTML(previewdisplay))
  else:
    raise Exception("Error generating preview!")

Scratch Space



In [0]:
#Do stuff here

#Example ffmpeg script:

!vspipe -y /content/autogenerated.vpy - | ffmpeg -i pipe: -c:v hevc_nvenc -profile:v main10 -preset lossless -spatial_aq:v 1 -aq-strength 15 "/gdrive/My Drive/upscale.mkv"

#TODO: Figure out why vspipe's progress isn't showing up in colab.

Extra Functions


In [0]:
#@title Build ImageMagick and VapourSynth for Colab
#@markdown VapourSynth needs to be built for Python 3.6, and Imagemagick needs to be built for the VapourSynth imwri plugin. The setup script pulls from bintray, but this cell will rebuild and reinstall them if those debs dont work. 
#@markdown The built debs can be found in the "src" folder.

#Get some requirements for building
def getbuildstuff():
  !apt-fast install software-properties-common autoconf automake libtool build-essential cython3 coreutils pkg-config
  !python3.6 -m pip install tesseract cython

#Build imagemagick, for imwri and local image manipulation, and create a deb
def buildmagick():
  makesrcd("imagemagick")
  !wget https://imagemagick.org/download/ImageMagick-7.0.9-8.tar.gz
  !tar xvzf ImageMagick-7.0.9-8.tar.gz
  %cd ImageMagick-7.0.9-8
  !./configure --enable-hdri=yes --with-quantum-depth=32
  !make -j 4 --quiet
  !sudo checkinstall -D --fstrans=no --install=yes --default --pakdir=/src --pkgname=imagemagick --pkgversion="8:7.0.9-8"
  !ldconfig /usr/local/lib

#Build vapoursynth for colab (python 3.6, Broadwell SIMD, etc.), and create a deb
def buildvs():
  makesrcd("vapoursynth")
  !wget https://github.com/vapoursynth/vapoursynth/archive/R48.tar.gz
  !tar -xf R48.tar.gz
  %cd vapoursynth-R48
  !./autogen.sh
  !./configure --enable-imwri
  !make -j 4 --quiet
  !sudo checkinstall -D --fstrans=no --install=yes --default --pakdir=/src --pkgname=vapoursynth --pkgversion=48
  !ldconfig /usr/local/lib
  
getbuildstuff()
buildmagick()
buildvs()

In [0]:
#@title MXnet
#@markdown This cell will pull pretrained models from https://github.com/WolframRhodium/Super-Resolution-Zoo
#@markdown For usage examples, see [this](https://github.com/WolframRhodium/muvsfunc/blob/master/Collections/examples/super_resolution_mxnet.vpy)
#@markdown and [this](https://github.com/WolframRhodium/Super-Resolution-Zoo/wiki/Explanation-of-configurations-in-info.md)
#Note that there's no release for the mxnet C++ plugin, and I can't get it to build in colab, but the header pulls and installs mxnet and the numpy super resolution function
n = "ESRGAN" #@param {type:"string"}
!svn update --set-depth infinity  NeuralNetworks/{n}