In [1]:
from IPython.display import HTML,display
from pybtex.database.input import bibtex
import re

def bibtex_raw(file="pubs.bib"):
    with open("pubs.bib","r") as f:
        lineas = f.readlines()

    lineas = [l for l in lineas if ("abstract" not in l) and ("file" not in l)]
    positions_start = [(i,re.search(r'\{(.*),',l).groups()[0]) for i,l in enumerate(lineas) if l.startswith('@')]
    cadena_bibtex = ["".join(lineas[positions_start[i][0]:(positions_start[i+1][0]-1)]) for i in range(len(positions_start)-1)]
    cadena_bibtex.append("".join(lineas[positions_start[-1][0]:]))

    dicts_bibtex = dict([(pi[1],c)for pi,c in zip(positions_start,cadena_bibtex)])
    return dicts_bibtex

filebib = 'pubs.bib'
parser = bibtex.Parser()
bib_data = parser.parse_file(filebib)


def make_cita(bibentry):
    persons = bibentry.persons.get('author')
    persons_print =[" ".join(person.last_names) for person in persons]
    #print(persons_print,len(persons_print))
    if len(persons_print)==1:
        cita = persons_print[0]
    elif len(persons_print)==2:
        cita = " and ".join(persons_print) 
    else:
        cita = persons_print[0]+" et al."

    cita+=" "+ str(bibentry.fields.get('year'))
    return cita

string = ""
for bibentry_key in sorted(bib_data.entries.keys(),key=lambda x: bib_data.entries[x].fields["year"]):
    bibentry = bib_data.entries[bibentry_key]
    if "url" in bibentry.fields:
        string+="<a href='%s'>[%s]</a></br>"%(bibentry.fields["url"],make_cita(bibentry))
    else:
        string+="[%s]</br>"%(make_cita(bibentry))
            

HTML(string)


Out[1]:
[Mateo-García 2012][Gómez-Chova et al. 2017][Mateo-García et al. 2017][Iannone et al. 2017][Munoz-Mari et al. 2017][Pérez-Suay et al. 2017][Gómez-Chova et al. 2017][Mateo-García et al. 2017][Mateo-García et al. 2017][Ruescas et al. 2018][Mateo-García et al. 2018][Mateo-García et al. 2018][Mateo-García and Gómez-Chova 2018][Ruescas et al. 2018][Wolanin et al. 2019][Mateo-Garcia et al. 2019][Mateo-García et al. 2019][Mateo-García et al. 2019][Camps-Valls et al. 2020][Mateo-García et al. 2020][Wolanin et al. 2020][Mateo-García et al. 2020]

In [13]:
# bib_data

In [10]:
bib_data.entries.keys()


Out[10]:
['gomez-chova_cloud_2017',
 'mateo-garcia_advances_2017',
 'iannone_proba-v_2017',
 'munoz-mari_hyperlabelme_2017',
 'perez-suay_fair_2017',
 'gomez-chova_cloud_2017-1',
 'mateo-garcia_cloud_2017',
 'mateo-garcia_convolutional_2017',
 'ruescas_machine_2018',
 'mateo-garcia_multitemporal_2018',
 'mateo-garcia_master_2012',
 'mateo-garcia_optimizing_2018',
 'mateo-garcia_convolutional_2018',
 'wolanin_estimating_2019',
 'ruescas_retrieval_2018',
 'mateo-garcia_flood_2019',
 'camps-valls_chapter_2020',
 'mateo-garcia_domain_2019',
 'mateo-garcia_convolutional_2019',
 'mateo-garcia_transferring_2020',
 'wolanin_estimating_2020']

In [36]:
bibentry = bib_data.entries['mateo-garcia_transferring_2020']
p = bibentry.persons["author"][0]

In [45]:



Out[45]:
'Transferring deep learning models for cloud detection between Landsat-8 and Proba-V'

In [25]:
p.last_names


Out[25]:
['Mateo-García']

In [5]:
from datetime import datetime

extra = dict()
bibtexsraw = bibtex_raw()
for k in bib_data.entries.keys():
    bibentry = bib_data.entries[k]
    print(k)
    extra[k] = dict()
    extra[k]["abstract"] = bibentry.fields["abstract"]
    extra[k]["title"] = bibentry.fields["title"].replace("{","").replace("}","")
    
    copyifkeys = ["journal","url","doi","booktitle"]
    for ci in copyifkeys:  
        if ci in bibentry.fields:
            extra[k][ci] = bibentry.fields.get(ci).replace("{","").replace("}","")
    
    extra[k]["bibtex"] = "{%% raw %%} %s {%% endraw %%}"%bibtexsraw[k]
    persons = bibentry.persons.get('author')
    persons_print =[str(person) for person in persons]
    #print(authors_feos)
    authors = [ ]
    for aut in persons_print:
        surname, name = aut.split(",")
        nombre_bien = str(name).replace(" ","")+" "+str(surname).replace(" ","")
        if (nombre_bien == "Gonzalo Mateo-Garcia") or (nombre_bien == "Gonzalo Mateo-García") or (nombre_bien == "G. Mateo-García") or (nombre_bien == "G. Mateo-Garcia"):
            nombre_bien  = "<u>%s</u>"%nombre_bien
        authors.append(nombre_bien)
    
    if len(authors) > 1:
        extra[k]["authors"] = ", ".join(authors[:-1])+" and "+authors[-1]
    else:
        extra[k]["authors"] = authors[-1]
        
    year_month = bibentry.fields["year"]+" "+bibentry.fields["month"]
    dat = datetime.strptime(year_month+" 01","%Y %B %d")
    extra[k]["date"] = dat
    
# extra


gomez-chova_cloud_2017
mateo-garcia_advances_2017
iannone_proba-v_2017
munoz-mari_hyperlabelme_2017
perez-suay_fair_2017
gomez-chova_cloud_2017-1
mateo-garcia_cloud_2017
mateo-garcia_convolutional_2017
ruescas_machine_2018
mateo-garcia_multitemporal_2018
mateo-garcia_master_2012
mateo-garcia_optimizing_2018
mateo-garcia_convolutional_2018
wolanin_estimating_2019
ruescas_retrieval_2018
mateo-garcia_flood_2019
camps-valls_chapter_2020
mateo-garcia_domain_2019
mateo-garcia_convolutional_2019
mateo-garcia_transferring_2020
wolanin_estimating_2020
mateo-garcia_cross-sensor_2020

In [7]:
import jinja2
import os

ordered_keys = sorted(extra.keys(),key=lambda k: extra[k]["date"],reverse=True)
extra["mateo-garcia_convolutional_2017"]["slides"] = "https://www.dropbox.com/s/v5mf77lcbzeqq35/1640_cnn_MateoGarcia.pdf?raw=1"
extra["mateo-garcia_convolutional_2018"]["slides"] = "https://www.dropbox.com/s/pjl7uj0b88w7l03/cnn_probav_igarss.pdf?raw=1"
extra["mateo-garcia_convolutional_2018"]["poster"] = "https://www.dropbox.com/s/zdhkne7rgttomgs/conference_poster_6.pdf?raw=1"
extra["mateo-garcia_convolutional_2018"]["pdf"] = "https://www.dropbox.com/s/501bwxdagoey76k/Mateo18bigarss.pdf?raw=1"
extra["mateo-garcia_optimizing_2018"]["slides"] = "https://www.dropbox.com/s/k6gsnm0vi5t6qjm/OKRR_IGARSS18.pdf?raw=1"
extra["mateo-garcia_optimizing_2018"]["pdf"] = "https://www.dropbox.com/s/853ijwutacyd6to/Mateo18aigarss.pdf?raw=1"
extra["mateo-garcia_cloud_2017"]["poster"] = "https://www.dropbox.com/s/mzx83weuvqpsalv/conference_poster_6.pdf?raw=1"
extra["mateo-garcia_advances_2017"]["poster"] = "https://www.dropbox.com/s/or8l3r15i8unvgx/conference_poster_6.pdf?raw=1"
extra["mateo-garcia_master_2012"]["booktitle"] = "Master Thesis"
extra["mateo-garcia_optimizing_2018"]["code"] = "https://github.com/gonzmg88/obfkrr"
extra["mateo-garcia_multitemporal_2018"]["code"] = "https://github.com/IPL-UV/ee_ipl_uv"
extra["mateo-garcia_multitemporal_2018"]["visualization"] = "http://isp.uv.es/projects/cdc/viewer_l8_GEE.html"
extra["gomez-chova_cloud_2017"]["code"] = "https://github.com/IPL-UV/ee_ipl_uv"
extra["gomez-chova_cloud_2017"]["visualization"] = "http://isp.uv.es/projects/cdc/GEE_cloud_detection_results.html"
extra["mateo-garcia_cloud_2017"]["code"] = "https://github.com/IPL-UV/ee_ipl_uv"
extra["ruescas_retrieval_2018"]["code"] = "https://github.com/IPL-UV/mlregocean"
extra["ruescas_machine_2018"]["code"] = "https://github.com/IPL-UV/mlregocean"

extra["mateo-garcia_convolutional_2019"]["slides"] = "https://www.dropbox.com/s/eywdhfnmk3xklw5/IGARSS2019_ConvLSTM.pptx?dl=0"
extra["mateo-garcia_convolutional_2019"]["pdf"] = "https://www.dropbox.com/s/ajx9jonwhq1nzpq/Mateo19bigarss.pdf?raw=1"

extra["mateo-garcia_flood_2019"]["poster"] = "https://drive.google.com/open?id=1Xw42E2tUAycX_oX1GfZEEKSyc_n0yzT3"
extra["mateo-garcia_flood_2019"]["slides"] ="https://docs.google.com/presentation/d/1QOIqB8i8oTy67FJQ0DT1V6ntFYt338-ntjbmRcfljIY/edit?usp=sharing"

extra["mateo-garcia_domain_2019"]["slides"] = "https://www.dropbox.com/scl/fi/pecrgyarsmyvbfmqv5q8r/IGARSS2019_DA-GAN.pptx?dl=0&rlkey=gioyu5s5k9q936hrwhyqgyzsy"

extra["mateo-garcia_transferring_2020"]["pdf"] = "https://www.dropbox.com/s/z1pz9mxb6i18ebk/Transfer_ISPRS_v3.pdf?raw=1"
extra["mateo-garcia_transferring_2020"]["code"] = "https://github.com/IPL-UV/pvl8dagans"

extra["mateo-garcia_cross-sensor_2020"]["visualization"] = "https://isp.uv.es/projects/cloudsat/pvl8dagans/"
extra["mateo-garcia_cross-sensor_2020"]["code"] = "https://github.com/IPL-UV/pvl8dagans"


def get_template(tpl_path):
    path, filename = os.path.split(tpl_path)
    return jinja2.Environment(
        loader=jinja2.FileSystemLoader(path or './')
    ).get_template(filename)

tpl = get_template("template_pubs.tpl")

with open("index.html", "w") as s:
    tpl.stream(extra=extra,ordered_keys=ordered_keys,absolute_rel_path="").dump(s)

display(HTML("<a href='http://localhost:4000/publications/index.html' target='blank'>link</a>"))