This notebook serves to sort German Wikipedia section headers by frequency as related to this research project.
In [1]:
import numpy as np
import pandas as pd
In [2]:
# read in headers file by chunks of 100000 to conserve memory
# https://stackoverflow.com/questions/25962114/how-to-read-a-6-gb-csv-file-with-pandas
tp = pd.read_csv('dewiki_20161101_headings_3.tsv', sep='\t', header=0, dtype={'page_id': np.int32, 'page_title': object, 'page_ns': np.int16, 'heading_level': np.int8, 'heading_text': object}, iterator=True, chunksize=100000)
In [3]:
# concatenate all rows into a pandas dataframe
de_DF = pd.concat([chunk for chunk in tp])
In [4]:
de_DF.head()
Out[4]:
In [5]:
de_DF.page_ns.unique()
Out[5]:
In [6]:
# determine number of unique articles with sections
len(de_DF['page_title'].unique())
Out[6]:
In [7]:
# remove leading and trailing whitespace from heading_text column
de_DF['heading_text'] = pd.core.strings.str_strip(de_DF['heading_text'])
In [8]:
# groupby heading_text and count the number of unique page_titles each heading appears in
# sort in descending order
# this returns a pandas series object
article_count = de_DF.groupby('heading_text')['page_title'].apply(lambda x: len(x.unique())).sort_values(ascending=False)
In [9]:
# turn pandas series object into pandas dataframe
de_article_count_DF = pd.DataFrame({'section_title':article_count.index, 'number_of_articles':article_count.values})
In [10]:
# add a column for the percentage of articles that header appears in
de_article_count_DF['article_percentage'] = (de_article_count_DF['number_of_articles']/1993198)*100
In [11]:
# set pandas options to display 100 rows
# round percentage to 2 decimal places and show top 100 results
pd.options.display.max_rows = 100
de_article_count_DF.round({'article_percentage': 2}).head(100)
Out[11]:
In [ ]: