In [1]:
import requests
import json
from time import gmtime, strftime
import shutil
Enter a keyword and searches for digitalnzcontent.
Filter out gifs, remixing.
put digital nz location image
In [2]:
keywodsea = input('dnz keyword: ')
In [3]:
from dominate.tags import *
import dominate
In [4]:
opdigke = open('/home/wcmckee/Downloads/digital.txt', 'r')
oprdz = opdigke.read()
opstrip = oprdz.strip('\n')
opdigke.close()
In [5]:
opets = open('/home/wcmckee/github/wcmckee.com/output/minedujobs/index.json', 'r')
In [6]:
oprdz = opets.read()
In [7]:
lenciv = len(json.loads(oprdz))
In [8]:
lenciv
Out[8]:
In [9]:
loorgin = list()
In [10]:
for lenves in range(0, lenciv):
print(json.loads(oprdz)[str(lenves)]['Location'])
loorgin.append((json.loads(oprdz)[str(lenves)]['Location']))
In [11]:
for looset in set(loorgin):
print(looset)
apidig = ('http://api.digitalnz.org/v3/records.json?api_key=' + opstrip + '&and[category]=Images&and[is_commercial_use]=True&text=' + looset)
reqseapi = requests.get(apidig)
#print(reqapi.text)
lenresu = len(json.loads(reqseapi.text)['search']['results'])
print(lenresu)
In [ ]:
In [12]:
apidig = ('http://api.digitalnz.org/v3/records.json?api_key=' + opstrip + '&and[category]=Images&and[is_commercial_use]=True&text=' + keywodsea)
In [13]:
#Error from this
#for lenves in range(0, lenciv):
# print(json.loads(oprdz)[str(lenves)]['Location'])
# print(('http://api.digitalnz.org/v3/records.json?api_key=' + opstrip + '&and[is_commercial_use]=True&text=' + (json.loads(oprdz)[str(lenves)]['Location'])))
# reqapi = requests.get(apidig)
#print(reqapi.text)
# print(json.loads(reqapi.text)['search']['results'])
In [ ]:
In [ ]:
In [14]:
reqapi = requests.get(apidig)
In [15]:
json.loads(reqapi.text)['search']['results'][0]['landing_url']
Out[15]:
In [16]:
lenres = json.loads(reqapi.text)['search']['results']
In [17]:
rele = len(lenres)
In [18]:
gifstrz = list()
In [19]:
for rea in range(0, rele):
print(rea)
print(lenres[rea]['title'])
for contenpar in (lenres[rea]['content_partner']):
print(contenpar)
print(lenres[rea]['created_at'])
print(lenres[rea]['rights'])
print(lenres[rea]['source_url'])
print(lenres[rea]['thumbnail_url'])
gifstrz.append(lenres[rea]['thumbnail_url'])
print(lenres[rea]['category'])
print(lenres[rea]['display_collection'])
In [20]:
glen = len(gifstrz)
In [21]:
glen
Out[21]:
In [ ]:
for gifim in gifstrz:
#a(rdz.url)
response = requests.get(gifim, stream=True)
for gl in range(0, glen):
print(gl)
with open('/home/wcmckee/gify/' + str(gl) + '.gif', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
In [ ]:
In [ ]:
dignzdoc = dominate.document(title='dignzgif')
with dignzdoc.head:
link(rel='stylesheet', href='style.css')
script(type ='text/javascript', src='script.js')
#str(str2)
with div():
attr(cls='header')
h1('dignzgif ' + keywodsea)
p(img('imgs/logo.svg', src='imgs/logo.svg'))
#p(img('imgs/15/01/02/ReptileLover82-reference.png', src= 'imgs/15/01/02/ReptileLover82-reference.png'))
h1('Updated ', strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
#p(panz)
with dignzdoc:
with div(id='body').add(ol()):
for rea in range(0, rele):
print(rea)
h3(lenres[rea]['title'])
#if 'http' in (lenres[rea]['large_thumbnail_url']):
# print(lenres[rea]['large_thumbnail_url'])
#else:
# p(lenres[rea]['large_thumbnail_url'])
#p(img(str(lenres[rea]['thumbnail_url']), src = str((lenres[rea]['large_thumbnail_url'])))
#p(lenres[rea]['description'])
#a(lenres[rea]['source'])
#print rdz.url
#if '.jpg' in rdz.url:
# img(rdz.urlz)
#else:
# a(rdz.urlz)
#h1(str(rdz.author))
#li(img(i.lower(), src='%s' % i))
with div():
attr(cls='body')
p('Gify is open source')
a('https://github.com/getsdrawn/getsdrawndotcom')
a('https://reddit.com/r/redditgetsdrawn')
#print doc
print (lenres[rea]['large_thumbnail_url'])
In [ ]:
print(dignzdoc)
In [ ]:
In [ ]: