Lesson 3

Data Quality


In [43]:
import csv
import re
import datetime

Quizzes


In [46]:
with open("autos.csv", "r") as f:
    reader = csv.DictReader(f)
    header = reader.fieldnames
    
    for row in reader:
        
        # filter rows if URI is not from dbpedia.org
        if re.search("dbpedia", row["URI"]):
            pass
        else:
            print(row["URI"])

        # check if productionStartYear contains a year
        if 
            pass
        else:
            print(row["productionStartYear"])
        
            # check if year is in range 1886-2014
            if row["productionStartYear"] in range(1886,2014+1):
                pass
            else:
                print(row["productionStartYear"])
        
        # convert year field type from datetime to int


<class 'dict'>
URI
http://dbpedia.org/ontology/productionStartYear
http://dbpedia.org/ontology/productionStartYear
<class 'dict'>
URI
XMLSchema#gYear
XMLSchema#gYear
<class 'dict'>
http://www.w3.org/2002/07/owl#Thing
http://www.w3.org/2001/XMLSchema#gYear
http://www.w3.org/2001/XMLSchema#gYear
<class 'dict'>
1989-01-01T00:00:00+02:00
1989-01-01T00:00:00+02:00
<class 'dict'>
1969-01-01T00:00:00+02:00
1969-01-01T00:00:00+02:00
<class 'dict'>
1957-01-01T00:00:00+02:00
1957-01-01T00:00:00+02:00
<class 'dict'>
1959-01-01T00:00:00+02:00
1959-01-01T00:00:00+02:00
<class 'dict'>
2108-01-01T00:00:00+02:00
2108-01-01T00:00:00+02:00
<class 'dict'>
1936-01-01T00:00:00+02:00
1936-01-01T00:00:00+02:00
<class 'dict'>
1953-01-01T00:00:00+02:00
1953-01-01T00:00:00+02:00
<class 'dict'>
2002-01-01T00:00:00+02:00
2002-01-01T00:00:00+02:00
<class 'dict'>
2002-01-01T00:00:00+02:00
2002-01-01T00:00:00+02:00
<class 'dict'>
1985-01-01T00:00:00+02:00
1985-01-01T00:00:00+02:00
<class 'dict'>
1939-01-01T00:00:00+02:00
1939-01-01T00:00:00+02:00
<class 'dict'>
1970-01-01T00:00:00+02:00
1970-01-01T00:00:00+02:00
<class 'dict'>
1957-01-01T00:00:00+02:00
1957-01-01T00:00:00+02:00
<class 'dict'>
1958-01-01T00:00:00+02:00
1958-01-01T00:00:00+02:00
<class 'dict'>
1983-01-01T00:00:00+02:00
1983-01-01T00:00:00+02:00
<class 'dict'>
1962-01-01T00:00:00+02:00
1962-01-01T00:00:00+02:00
<class 'dict'>
1964-01-01T00:00:00+02:00
1964-01-01T00:00:00+02:00
<class 'dict'>
0001-01-01T00:00:00+02:00
0001-01-01T00:00:00+02:00
<class 'dict'>
1964-01-01T00:00:00+02:00
1964-01-01T00:00:00+02:00
<class 'dict'>
1979-01-01T00:00:00+02:00
1979-01-01T00:00:00+02:00
<class 'dict'>
1961-01-01T00:00:00+02:00
1961-01-01T00:00:00+02:00
<class 'dict'>
1975-01-01T00:00:00+02:00
1975-01-01T00:00:00+02:00
<class 'dict'>
1953-01-01T00:00:00+02:00
1953-01-01T00:00:00+02:00
<class 'dict'>
NULL
NULL
<class 'dict'>
1978-01-01T00:00:00+02:00
1978-01-01T00:00:00+02:00
<class 'dict'>
1908-01-01T00:00:00+02:00
1908-01-01T00:00:00+02:00
<class 'dict'>
1994-01-01T00:00:00+02:00
1994-01-01T00:00:00+02:00
<class 'dict'>
1955-01-01T00:00:00+02:00
1955-01-01T00:00:00+02:00
<class 'dict'>
1927-01-01T00:00:00+02:00
1927-01-01T00:00:00+02:00
<class 'dict'>
1951-01-01T00:00:00+02:00
1951-01-01T00:00:00+02:00
<class 'dict'>
1979-01-01T00:00:00+02:00
1979-01-01T00:00:00+02:00
<class 'dict'>
NULL
NULL
<class 'dict'>
1955-01-01T00:00:00+02:00
1955-01-01T00:00:00+02:00
<class 'dict'>
2002-01-01T00:00:00+02:00
2002-01-01T00:00:00+02:00
<class 'dict'>
1985-01-01T00:00:00+02:00
1985-01-01T00:00:00+02:00
<class 'dict'>
1968-01-01T00:00:00+02:00
1968-01-01T00:00:00+02:00
<class 'dict'>
1979-01-01T00:00:00+02:00
1979-01-01T00:00:00+02:00
<class 'dict'>
1975-01-01T00:00:00+02:00
1975-01-01T00:00:00+02:00
<class 'dict'>
1994-01-01T00:00:00+02:00
1994-01-01T00:00:00+02:00
<class 'dict'>
1956-01-01T00:00:00+02:00
1956-01-01T00:00:00+02:00
<class 'dict'>
1968-01-01T00:00:00+02:00
1968-01-01T00:00:00+02:00
<class 'dict'>
1977-01-01T00:00:00+02:00
1977-01-01T00:00:00+02:00
<class 'dict'>
1983-01-01T00:00:00+02:00
1983-01-01T00:00:00+02:00
<class 'dict'>
2001-01-01T00:00:00+02:00
2001-01-01T00:00:00+02:00
<class 'dict'>
1959-01-01T00:00:00+02:00
1959-01-01T00:00:00+02:00
<class 'dict'>
1956-01-01T00:00:00+02:00
1956-01-01T00:00:00+02:00

In [2]:
"""
Your task is to check the "productionStartYear" of the DBPedia autos datafile for valid values.
The following things should be done:
- check if the field "productionStartYear" contains a year
- check if the year is in range 1886-2014
- convert the value of the field to be just a year (not full datetime)
- the rest of the fields and values should stay the same
- if the value of the field is a valid year in the range as described above,
  write that line to the output_good file
- if the value of the field is not a valid year as described above, 
  write that line to the output_bad file
- discard rows (neither write to good nor bad) if the URI is not from dbpedia.org
- you should use the provided way of reading and writing data (DictReader and DictWriter)
  They will take care of dealing with the header.

You can write helper functions for checking the data and writing the files, but we will call only the 
'process_file' with 3 arguments (inputfile, output_good, output_bad).
"""
import csv
import pprint

INPUT_FILE = 'autos.csv'
OUTPUT_GOOD = 'autos-valid.csv'
OUTPUT_BAD = 'FIXME-autos.csv'

def process_file(input_file, output_good, output_bad):

    with open(input_file, "r") as f:
        reader = csv.DictReader(f)
        header = reader.fieldnames
        
        # loop over rows in reader
        for row in reader:
        
            # filter rows if URI is not from dbpedia.org

            # check if productionStartYear contains a year
        
            # check if year is in range 1886-2014

            # convert year field type from datetime to int
        
            # if checks are passed write line to output_good
        
            # else write line to output_bad file

    # This is just an example on how you can use csv.DictWriter
    # Remember that you have to output 2 files
    with open(output_good, "w") as g:
        writer = csv.DictWriter(g, delimiter=",", fieldnames= header)
        writer.writeheader()
        for row in YOURDATA:
            writer.writerow(row)


def test():

    process_file(INPUT_FILE, OUTPUT_GOOD, OUTPUT_BAD)


if __name__ == "__main__":
    test()


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-2-ec5bc15f3e4e> in <module>()
     61 
     62 if __name__ == "__main__":
---> 63     test()

<ipython-input-2-ec5bc15f3e4e> in test()
     57 def test():
     58 
---> 59     process_file(INPUT_FILE, OUTPUT_GOOD, OUTPUT_BAD)
     60 
     61 

<ipython-input-2-ec5bc15f3e4e> in process_file(input_file, output_good, output_bad)
     51         writer = csv.DictWriter(g, delimiter=",", fieldnames= header)
     52         writer.writeheader()
---> 53         for row in YOURDATA:
     54             writer.writerow(row)
     55 

NameError: name 'YOURDATA' is not defined

Problem sets


In [94]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up. In the first exercise we want you to audit
the datatypes that can be found in some particular fields in the dataset.
The possible types of values can be:
- NoneType if the value is a string "NULL" or an empty string ""
- list, if the value starts with "{"
- int, if the value can be cast to int
- float, if the value can be cast to float, but CANNOT be cast to int.
   For example, '3.23e+07' should be considered a float because it can be cast
   as float but int('3.23e+07') will throw a ValueError
- 'str', for all other values

The audit_file function should return a dictionary containing fieldnames and a 
SET of the types that can be found in the field. e.g.
{"field1": set([type(float()), type(int()), type(str())]),
 "field2": set([type(str())]),
  ....
}
The type() function returns a type object describing the argument given to the 
function. You can also use examples of objects to create type objects, e.g.
type(1.1) for a float: see the test function below for examples.

Note that the first three rows (after the header row) in the cities.csv file
are not actual data points. The contents of these rows should note be included
when processing data types. Be sure to include functionality in your code to
skip over or detect these rows.
"""
import codecs
import csv
import json
import pprint

CITIES = 'cities.csv'

FIELDS = ["name", "timeZone_label", "utcOffset", "homepage", "governmentType_label",
          "isPartOf_label", "areaCode", "populationTotal", "elevation",
          "maximumElevation", "minimumElevation", "populationDensity",
          "wgs84_pos#lat", "wgs84_pos#long", "areaLand", "areaMetro", "areaUrban"]

def audit_file(filename, fields):
    #fieldtypes = dict.fromkeys(fields)
    from collections import defaultdict
    fieldtypes = defaultdict(list)

    # open file
    with open(filename, "r") as f:
        reader = csv.DictReader(f)
        
        # loop over rows
        for ix, row in enumerate(reader):
            
            # skip header rows
            if ix < 3:
                next
            
            # process rows
            else:
                
                # loop over fields
                for pos in range(0, len(fields)):
                    
                    # check for NoneType
                    if row[fields[pos]] == "NULL" or row[fields[pos]] == "":
                        if type(None) not in fieldtypes[fields[pos]]:
                            fieldtypes[fields[pos]].append(type(None))
                    
                    # check for list
                    elif row[fields[pos]].startswith("{"):
                        if type(list()) not in fieldtypes[fields[pos]]:
                            fieldtypes[fields[pos]].append(type(list()))
                    
                    # check for int
                    elif isinstance(row[fields[pos]], int):
                        if type(int()) not in fieldtypes[fields[pos]]:
                            fieldtypes[fields[pos]].append(type(int()))
                            
                    # check for float
                    elif isinstance(row[fields[pos]], float):
                        if type(float()) not in fieldtypes[fields[pos]]:
                            fieldtypes[fields[pos]].append(type(float()))
                    
                    # else assign string value
                    else:
                        if type(str()) not in fieldtypes[fields[pos]]:
                            fieldtypes[fields[pos]].append(type(str()))
            
    return fieldtypes

def test():
    fieldtypes = audit_file(CITIES, FIELDS)

    pprint.pprint(fieldtypes)

    assert fieldtypes["areaLand"] == set([type(1.1), type([]), type(None)])
    assert fieldtypes['areaMetro'] == set([type(1.1), type(None)])
    
if __name__ == "__main__":
    test()


defaultdict(<class 'list'>,
            {'areaCode': [<class 'NoneType'>, <class 'str'>],
             'areaLand': [<class 'NoneType'>, <class 'list'>, <class 'str'>],
             'areaMetro': [<class 'NoneType'>, <class 'str'>],
             'areaUrban': [<class 'NoneType'>, <class 'str'>],
             'elevation': [<class 'str'>, <class 'list'>, <class 'NoneType'>],
             'governmentType_label': [<class 'NoneType'>, <class 'str'>],
             'homepage': [<class 'NoneType'>, <class 'str'>],
             'isPartOf_label': [<class 'list'>,
                                <class 'str'>,
                                <class 'NoneType'>],
             'maximumElevation': [<class 'NoneType'>],
             'minimumElevation': [<class 'NoneType'>],
             'name': [<class 'str'>, <class 'list'>, <class 'NoneType'>],
             'populationDensity': [<class 'NoneType'>,
                                   <class 'list'>,
                                   <class 'str'>],
             'populationTotal': [<class 'str'>, <class 'NoneType'>],
             'timeZone_label': [<class 'str'>, <class 'NoneType'>],
             'utcOffset': [<class 'str'>, <class 'list'>, <class 'NoneType'>],
             'wgs84_pos#lat': [<class 'str'>],
             'wgs84_pos#long': [<class 'str'>]})
---------------------------------------------------------------------------
AssertionError                            Traceback (most recent call last)
<ipython-input-94-9fc3dc21a4e1> in <module>()
     99 
    100 if __name__ == "__main__":
--> 101     test()

<ipython-input-94-9fc3dc21a4e1> in test()
     95     pprint.pprint(fieldtypes)
     96 
---> 97     assert fieldtypes["areaLand"] == set([type(1.1), type([]), type(None)])
     98     assert fieldtypes['areaMetro'] == set([type(1.1), type(None)])
     99 

AssertionError: 

In [100]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.

Since in the previous quiz you made a decision on which value to keep for the
"areaLand" field, you now know what has to be done.

Finish the function fix_area(). It will receive a string as an input, and it
has to return a float representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you
like, but changes to process_file will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint

CITIES = 'cities.csv'


def fix_area(area):

    try:
        # coerce string to float
        area = float(area)
        
    except ValueError:
        
        # omnit NULL and empty strings
        if area == "NULL" or area == "":
            area = None
        
        else:
            # parse area values from list
            area_stripped = area.strip("{").strip("}").split("|")
        
            # compare area values and keep longer value
            if len(area_stripped[0]) > len(area_stripped[1]):
                area = float(area_stripped[0])
            else:
                area = float(area_stripped[1])

    return area

def process_file(filename):
    # CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE
    data = []

    with open(filename, "r") as f:
        reader = csv.DictReader(f)

        #skipping the extra metadata
        for i in range(3):
            l = reader.next()

        # processing file
        for line in reader:
            # calling your function to fix the area value
            if "areaLand" in line:
                line["areaLand"] = fix_area(line["areaLand"])
            data.append(line)

    return data


def test():
    data = process_file(CITIES)

    print("Printing three example results:")
    for n in range(5,8):
        pprint.pprint(data[n]["areaLand"])

    assert data[3]["areaLand"] == None        
    assert data[8]["areaLand"] == 55166700.0
    assert data[20]["areaLand"] == 14581600.0
    assert data[33]["areaLand"] == 20564500.0    


if __name__ == "__main__":
    test()


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-100-f962cd4061f4> in <module>()
     81 
     82 if __name__ == "__main__":
---> 83     test()

<ipython-input-100-f962cd4061f4> in test()
     68 
     69 def test():
---> 70     data = process_file(CITIES)
     71 
     72     print("Printing three example results:")

<ipython-input-100-f962cd4061f4> in process_file(filename)
     55         #skipping the extra metadata
     56         for i in range(3):
---> 57             l = reader.next()
     58 
     59         # processing file

AttributeError: 'DictReader' object has no attribute 'next'

In [122]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.

In the previous quiz you recognized that the "name" value can be an array (or
list in Python terms). It would make it easier to process and query the data
later if all values for the name are in a Python list, instead of being
just a string separated with special characters, like now.

Finish the function fix_name(). It will recieve a string as an input, and it
will return a list of all the names. If there is only one name, the list will
have only one item in it; if the name is "NULL", the list should be empty.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import pprint

CITIES = 'cities.csv'


def fix_name(name):
    
    # check if name is equal to NULL or empty
    if name == "NULL" or name == "":
        name = []
    
    else:
        # parse name list
        name = name.strip("{").strip("}").strip(" ").split("|")

    return name


def process_file(filename):
    data = []
    with open(filename, "r") as f:
        reader = csv.DictReader(f)
        #skipping the extra metadata
        for i in range(3):
            l = next(reader)
        # processing file
        for line in reader:
            # calling your function to fix the area value
            if "name" in line:
                line["name"] = fix_name(line["name"])
            data.append(line)
    return data


def test():
    data = process_file(CITIES)

    print("Printing 20 results:")
    for n in range(20):
        pprint.pprint(data[n]["name"])

    assert data[14]["name"] == ['Negtemiut', 'Nightmute']
    assert data[9]["name"] == ['Pell City Alabama']
    assert data[3]["name"] == ['Kumhari']

if __name__ == "__main__":
    test()


Printing 20 results:
['Kud']
['Kuju']
['Kumbhraj']
['Kumhari']
['Kunigal']
['Kurgunta']
['Athens']
['Demopolis']
['Chelsea Alabama']
['Pell City Alabama']
['City of Northport']
['Sand Point']
['Unalaska Alaska']
['City of Menlo Park']
['Negtemiut', 'Nightmute']
['Fairbanks Alaska']
['Homer']
['Ketchikan Alaska']
['Nuniaq', 'Old Harbor']
['Rainier Washington']

In [134]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.

If you look at the full city data, you will notice that there are couple of
values that seem to provide the same information in different formats: "point"
seems to be the combination of "wgs84_pos#lat" and "wgs84_pos#long". However,
we do not know if that is the case and should check if they are equivalent.

Finish the function check_loc(). It will recieve 3 strings: first, the combined
value of "point" followed by the separate "wgs84_pos#" values. You have to
extract the lat and long values from the "point" argument and compare them to
the "wgs84_pos# values, returning True or False.

Note that you do not have to fix the values, only determine if they are
consistent. To fix them in this case you would need more information. Feel free
to discuss possible strategies for fixing this on the discussion forum.

The rest of the code is just an example on how this function can be used.
Changes to "process_file" function will not be taken into account for grading.
"""
import csv
import pprint

CITIES = 'cities.csv'


def check_loc(point, lat, longi):
    
    # parse point values
    point = point.split(" ")
    
    # compare wether parsed values are equal to lat and longi
    if point[0] == lat and point[1] == longi:
        return True
    else:
        return False


def process_file(filename):
    data = []
    with open(filename, "r") as f:
        reader = csv.DictReader(f)
        #skipping the extra matadata
        for i in range(3):
            l = reader.next()
        # processing file
        for line in reader:
            # calling your function to check the location
            result = check_loc(line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"])
            if not result:
                print("{}: {} != {} {}".format(line["name"], line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"]))
            data.append(line)

    return data


def test():
    assert check_loc("33.08 75.28", "33.08", "75.28") == True
    assert check_loc("44.57833333333333 -91.21833333333333", "44.5783", "-91.2183") == False

if __name__ == "__main__":
    test()