In [4]:
repo_directory = '/Users/iaincarmichael/Dropbox/Research/law/law-net/'

data_dir = '/Users/iaincarmichael/Documents/courtlistener/data/'

import numpy as np
import sys
import matplotlib.pyplot as plt
from scipy.stats import rankdata
from collections import Counter

import discreteMarkovChain as dmc
from scipy.sparse import csr_matrix, dia_matrix


# graph package
import igraph as ig

# our code
sys.path.append(repo_directory + 'code/')
from setup_data_dir import setup_data_dir, make_subnetwork_directory
from pipeline.download_data import download_bulk_resource, download_master_edgelist, download_scdb
from helpful_functions import case_info

sys.path.append(repo_directory + 'vertex_metrics_experiment/code/')
from rankscore_experiment_sort import *
from rankscore_experiment_LR import *
from rankscore_experiment_search import *
from time_aware_pagerank import *


from make_tr_edge_df import *

# which network to download data for
network_name = 'scotus' # 'federal', 'ca1', etc


# some sub directories that get used
raw_dir = data_dir + 'raw/'
subnet_dir = data_dir + network_name + '/'
text_dir = subnet_dir + 'textfiles/'


# jupyter notebook settings
%load_ext autoreload
%autoreload 2
%matplotlib inline


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [5]:
# load scotes
G = ig.Graph.Read_GraphML(subnet_dir + network_name +'_network.graphml')

In [6]:
# get a small sugraph to work wit
np.random.seed(754) # 234, 754
v = G.vs[np.random.choice(range(len(G.vs)))]

subset_ids = G.neighborhood(v.index, order=2)
g = G.subgraph(subset_ids)

print '%d nodes' % len(g.vs)

A = np.array(g.get_adjacency().data)
years = np.array(g.vs['year']).astype(int)


2119 nodes

setup


In [4]:
%%time

# A = np.array(G.get_adjacency().data)
# years = np.array(G.vs['year']).astype(int)

# np.save('scotus_adjmat', A)
A = np.load('scotus_adjmat.npy')


CPU times: user 1.36 ms, sys: 8.1 s, total: 8.1 s
Wall time: 9.46 s

In [5]:
years = np.array(G.vs['year']).astype(int)

In [9]:
p = .85
qtv = .3
qvt = .5

dense


In [ ]:
%%time

# number of vertices in the graph
n = A.shape[0]
outdegrees = A.sum(axis=1)

# zero index the years
Y = np.array(years) - min(years)
m = max(Y) + 1

In [ ]:
%%time
# number of cases per year
cases_per_year = [0] * m
cases_per_year_counter = Counter(Y)
for k in cases_per_year_counter.keys():
    cases_per_year[k] = cases_per_year_counter[k]

In [ ]:
%%time
# PageRank transition matrix
# (see murphy 17.37)
D = np.diag([0 if d == 0 else 1.0/d for d in outdegrees])
z = [1.0/n if d == 0 else (1.0 - p) / n for d in outdegrees]
# PR = p * np.dot(A.T, D) + np.outer([1] * n, z)

In [ ]:
%%time
PR = np.dot(A.T, D)

In [ ]:
%%time
PR = p * PR + np.outer([1] * n, z)

In [ ]:
%%time
# Time-Time transition matrix
# ones below diagonal
TT = np.zeros((m, m))
TT[1:m, :m-1] = np.diag([1] * (m - 1))

In [ ]:
%%time
# Vertex-Time transition matrix
# i-th column is the Y[i]th basis vector
VT = np.zeros((m, n))
identity_m = np.eye(m)  # for basis vectors
for i in range(n):
    VT[:, i] = identity_m[:, Y[i]]

In [ ]:
%%time
# Time-Vertex transition matrix
# VT transpose but entries are scaled by number of cases in the year
TV = np.zeros((n, m))
# 1 over number of cases per year
n_inv = [0 if cases_per_year[i] == 0 else 1.0/cases_per_year[i]
         for i in range(m)]
for i in range(n):
    TV[i, :] = identity_m[Y[i], :] * n_inv[Y[i]]

In [ ]:
%%time
# normalization matrix for TV
qtv_diag = [0 if cases_per_year[i] == 0 else qtv for i in range(m)]
qtv_diag[-1] = 1  # last column of TT is zeros
Qtv = np.diag(qtv_diag)

In [ ]:
%%time
# overall transition matrix
P = np.zeros((n + m, n + m))
P[:n, :n] = (1 - qvt) * PR  # upper left
P[n:, :-m] = qvt * VT  # lower left
P[:n, -m:] = np.dot(TV, Qtv)  # upper right
P[-m:, -m:] = np.dot(TT, np.eye(m) - Qtv)  # lower right

In [ ]:
P_dense = P

Sparse


In [7]:
%%time
# convert to sparse
A = csr_matrix(A, dtype=int)


CPU times: user 7.46 s, sys: 6.65 s, total: 14.1 s
Wall time: 17.5 s

In [8]:
%%time

# number of vertices in the graph
n = A.shape[0]
# surely there is a more elegant way to do this
# outdegrees = A.sum(axis=1)
outdegrees = A.sum(axis=1).reshape(-1).tolist()[0] 

# zero index the years
Y = np.array(years) - min(years)
m = max(Y) + 1


CPU times: user 6.67 ms, sys: 4.17 ms, total: 10.8 ms
Wall time: 10.5 ms

In [9]:
%%time
# number of cases per year
cases_per_year = [0] * m
cases_per_year_counter = Counter(Y)
for k in cases_per_year_counter.keys():
    cases_per_year[k] = cases_per_year_counter[k]


CPU times: user 34.2 ms, sys: 9.89 ms, total: 44.1 ms
Wall time: 46.5 ms

In [10]:
%%time
# PageRank transition matrix
# (see murphy 17.37)
# D = np.diag([0 if d == 0 else 1.0/d for d in outdegrees])
# D = dia_matrix([0 if d == 0 else 1.0/d for d in outdegrees],
#                shape=(n, n))
# TODO: csr or dia matrix?
D = csr_matrix(np.diag([0 if d == 0 else 1.0/d for d in outdegrees]))

z = [1.0/n if d == 0 else (1.0 - p) / n for d in outdegrees]
# PR = p * np.dot(A.T, D) + np.outer([1] * n, z)


CPU times: user 9.52 s, sys: 5.62 s, total: 15.1 s
Wall time: 18.6 s

In [11]:
%%time
# PR = np.dot(A.T, D)

PR = (1 - qvt) * p * (A.transpose() * D)
PR = PR.todense()


CPU times: user 194 ms, sys: 355 ms, total: 549 ms
Wall time: 577 ms

In [12]:
%%time
PR = PR + np.outer([1 - qvt] * n, z)


CPU times: user 5.09 s, sys: 18.1 s, total: 23.2 s
Wall time: 32.1 s

In [13]:
%%time
# Time-Time transition matrix
# ones below diagonal
TT = np.zeros((m, m))
TT[1:m, :m-1] = np.diag([1] * (m - 1))

# TODO: csr or dia matrix
TT = dia_matrix(TT)
# TT = csr_matrix(TT)


CPU times: user 2.17 ms, sys: 2.25 ms, total: 4.42 ms
Wall time: 8.84 ms

In [14]:
%%time
# Vertex-Time transition matrix
# i-th column is the Y[i]th basis vector
VT = np.zeros((m, n))
identity_m = np.eye(m)  # for basis vectors
for i in range(n):
    VT[:, i] = qvt *identity_m[:, Y[i]]


CPU times: user 147 ms, sys: 28.8 ms, total: 176 ms
Wall time: 186 ms

In [15]:
%%time
# Time-Vertex transition matrix
# VT transpose but entries are scaled by number of cases in the year
TV = np.zeros((n, m))
# 1 over number of cases per year
n_inv = [0 if cases_per_year[i] == 0 else 1.0/cases_per_year[i]
         for i in range(m)]
for i in range(n):
    TV[i, :] = identity_m[Y[i], :] * n_inv[Y[i]]
    
TV = csr_matrix(TV)


CPU times: user 183 ms, sys: 32.4 ms, total: 216 ms
Wall time: 229 ms

In [16]:
%%time
# normalization matrix for TV
qtv_diag = [0 if cases_per_year[i] == 0 else qtv for i in range(m)]
qtv_diag[-1] = 1  # last column of TT is zeros
# Qtv = dia_matrix(np.diag(qtv_diag))
Qtv = csr_matrix(np.diag(qtv_diag))


CPU times: user 1.11 ms, sys: 64 µs, total: 1.18 ms
Wall time: 1.2 ms

In [17]:
%%time
# overall transition matrix
P = np.zeros((n + m, n + m))


CPU times: user 14 µs, sys: 27 µs, total: 41 µs
Wall time: 44.1 µs

In [18]:
%%time
# upper left
# P[:n, :n] = (1 - qvt) * PR  
P[:n, :n] = PR


CPU times: user 2.99 s, sys: 12.3 s, total: 15.3 s
Wall time: 19.8 s

In [19]:
%%time
# lower left
# P[n:, :-m] = qvt * VT  
P[n:, :-m] = VT


CPU times: user 24.9 ms, sys: 64.5 ms, total: 89.4 ms
Wall time: 138 ms

In [21]:
%%time
# upper right
P[:n, -m:] = (TV * Qtv).todense()


CPU times: user 54.1 ms, sys: 271 ms, total: 325 ms
Wall time: 505 ms

In [22]:
%%time
# lower right
P[-m:, -m:] = (TT * dia_matrix(np.eye(m) - Qtv)).todense()


CPU times: user 4.81 ms, sys: 3.34 ms, total: 8.15 ms
Wall time: 7.38 ms

create function


In [7]:
def get_time_aware_pagerank_matrix_sparse(A, years, p, qtv, qvt):
    A = csr_matrix(A, dtype=int)
    
    # number of vertices in the graph
    n = A.shape[0]
    # surely there is a more elegant way to do this
    outdegrees = A.sum(axis=1).reshape(-1).tolist()[0] 

    # zero index the years
    Y = np.array(years) - min(years)
    m = max(Y) + 1
    
    # number of cases per year
    cases_per_year = [0] * m
    cases_per_year_counter = Counter(Y)
    for k in cases_per_year_counter.keys():
        cases_per_year[k] = cases_per_year_counter[k]
        
    # PageRank transition matrix
    # (see murphy 17.37)
    # TODO: csr or dia matrix?
    D = csr_matrix(np.diag([0 if d == 0 else 1.0/d for d in outdegrees]))
    z = [1.0/n if d == 0 else (1.0 - p) / n for d in outdegrees]
    PR = (1 - qvt) * p * (A.transpose() * D)
    PR = PR.todense()
    PR = PR + np.outer([1 - qvt] * n, z)
    
    # Time-Time transition matrix
    # ones below diagonal
    TT = np.zeros((m, m))
    TT[1:m, :m-1] = np.diag([1] * (m - 1))
    TT = dia_matrix(TT)
    
    # Vertex-Time transition matrix
    # i-th column is the Y[i]th basis vector
    VT = np.zeros((m, n))
    identity_m = np.eye(m)  # for basis vectors
    for i in range(n):
        VT[:, i] = qvt *identity_m[:, Y[i]]
        
        
    # Time-Vertex transition matrix
    # VT transpose but entries are scaled by number of cases in the year
    TV = np.zeros((n, m))
    # 1 over number of cases per year
    n_inv = [0 if cases_per_year[i] == 0 else 1.0/cases_per_year[i]
             for i in range(m)]
    for i in range(n):
        TV[i, :] = identity_m[Y[i], :] * n_inv[Y[i]]
    TV = csr_matrix(TV)
    
    # normalization matrix for TV
    qtv_diag = [0 if cases_per_year[i] == 0 else qtv for i in range(m)]
    qtv_diag[-1] = 1  # last column of TT is zeros
    # Qtv = dia_matrix(np.diag(qtv_diag))
    Qtv = csr_matrix(np.diag(qtv_diag))
    
    # overall transition matrix
    P = np.zeros((n + m, n + m)) # upper left
    P[:n, :n] = PR # lower left
    P[n:, :-m] = VT # upper right
    P[:n, -m:] = (TV * Qtv).todense() # lower right
    P[-m:, -m:] = (TT * dia_matrix(np.eye(m) - Qtv)).todense()  

    return P.T

In [ ]:


In [ ]:


In [12]:
%%time
P = get_time_aware_pagerank_matrix_sparse(A, years, p, qtv, qvt)


CPU times: user 174 ms, sys: 46.8 ms, total: 221 ms
Wall time: 229 ms

In [ ]:
%%time
P_dense = get_time_aware_pagerank_matrix(A, years, p, qtv, qvt)

In [19]:
diff = P - P_dense
diff.sum()


Out[19]:
0.0

In [19]:
sum(P[:, 0])


Out[19]:
1.0000000000000249

solve for steady state


In [13]:
%%time
mc = dmc.markovChain(P.T)
mc.computePi('linear')
dmc_linear = mc.pi


CPU times: user 4.47 s, sys: 565 ms, total: 5.03 s
Wall time: 3.96 s

In [14]:
%%time
mc = dmc.markovChain(P.T)
mc.computePi('krylov')
dmc_krylov = mc.pi


CPU times: user 2.61 s, sys: 297 ms, total: 2.91 s
Wall time: 2.92 s

In [15]:
%%time 
mc = dmc.markovChain(P.T)
mc.computePi('eigen')
dmc_eigen = mc.pi


CPU times: user 3.54 s, sys: 251 ms, total: 3.79 s
Wall time: 2.69 s

In [16]:
%%time
leading_eig = get_leading_evector(P)
leading_eig = leading_eig/sum(leading_eig)


CPU times: user 24.9 s, sys: 516 ms, total: 25.4 s
Wall time: 16.9 s

In [ ]: