In [10]:
from urllib.request import urlopen
from bs4 import BeautifulSoup
from datetime import datetime, date, time
import csv
import webbrowser
import pandas as pd
def open_browser():
url = 'https://www.maxanet.com/cgi-bin/mnhistory.cgi?rosen316/1'
webbrowser.open(url)
open_browser()
def getdate():
time = datetime.now()
dt = time.strftime("%A, %d %B %Y %I:%M%p")
print(dt)
html = urlopen("https://www.maxanet.com/cgi-bin/mnhistory.cgi?rosen316/1")
soup = BeautifulSoup(html, "lxml")
file = open("itemhistory.csv", 'w')
dtable = soup.find('table',id ='DataTable').findAll('tr')
itemdatasaved = " "
for record in dtable:
itemdata = " "
for data in record.findAll('td'):
itemdata = itemdata + "," + data.text
itemdatasaved = itemdatasaved + "\n" + itemdata[2:]
file.write(itemdatasaved)
file.close()
In [11]:
ihist = pd.read_csv('itemhistory.csv', header=[1],skiprows=1)
ihist.head()
In [13]:
ihist.shape
Out[13]:
In [ ]:
#iterate through items until there are no more items.