Just an alpha verstion, no automatic triangulation of a space, plus some minor bugs yet to be solved.
Some more inspiration at https://triangleinequality.wordpress.com/2014/01/23/computing-homology/ (probably a better algorithm, need to study it and take some inspiration for the final version )
In [6]:
import numpy
def rowSwap(A, i, j):
temp = numpy.copy(A[i, :])
A[i, :] = A[j, :]
A[j, :] = temp
def colSwap(A, i, j):
temp = numpy.copy(A[:, i])
A[:, i] = A[:, j]
A[:, j] = temp
def scaleCol(A, i, c):
A[:, i] = numpy.multiply(A[:, i], c*numpy.ones(A.shape[0]))
def scaleRow(A, i, c):
A[i, :] = numpy.multiply(A[i, :], c*numpy.ones(A.shape[1]))
def colCombine(A, addTo, scaleCol, scaleAmt):
A[:, addTo] += scaleAmt * A[:, scaleCol]
def rowCombine(A, addTo, scaleRow, scaleAmt):
A[addTo, :] += scaleAmt * A[scaleRow, :]
In [7]:
def simultaneousReduce(A, B):
if A.shape[1] != B.shape[0]:
raise Exception("Matrices have the wrong shape.")
numRows, numCols = A.shape # col reduce A
i,j = 0,0
while True:
if i >= numRows or j >= numCols:
break
if A[i][j] == 0:
nonzeroCol = j
while nonzeroCol < numCols and A[i,nonzeroCol] == 0:
nonzeroCol += 1
if nonzeroCol == numCols:
i += 1
continue
colSwap(A, j, nonzeroCol)
rowSwap(B, j, nonzeroCol)
pivot = A[i,j]
scaleCol(A, j, 1.0 / pivot)
scaleRow(B, j, 1.0 / pivot)
for otherCol in range(0, numCols):
if otherCol == j:
continue
if A[i, otherCol] != 0:
scaleAmt = -A[i, otherCol]
colCombine(A, otherCol, j, scaleAmt)
rowCombine(B, j, otherCol, -scaleAmt)
i += 1; j+= 1
return A,B
In [8]:
def numPivotCols(A):
z = numpy.zeros(A.shape[0])
return [numpy.all(A[:, j] == z) for j in range(A.shape[1])].count(False)
def numPivotRows(A):
z = numpy.zeros(A.shape[1])
return [numpy.all(A[i, :] == z) for i in range(A.shape[0])].count(False)
In [11]:
def bettiNumber(d_k, d_kplus1):
A, B = numpy.copy(d_k), numpy.copy(d_kplus1)
simultaneousReduce(A, B)
#finishRowReducing(B)
dimKChains = A.shape[1]
print(dimKChains)
kernelDim = dimKChains - numPivotCols(A)
print(kernelDim)
imageDim = numPivotRows(B)
print(imageDim)
return kernelDim - imageDim
In [12]:
bd0 = numpy.array([[0,0,0,0,0]])
bd1 = numpy.array([[-1,-1,-1,-1,0,0,0,0], [1,0,0,0,-1,-1,0,0],
[0,1,0,0,1,0,-1,-1], [0,0,1,0,0,1,1,0], [0,0,0,1,0,0,0,1]])
# bd2 = numpy.array([[1,1,0,0],[-1,0,1,0],[0,-1,-1,0],
# [0,0,0,0],[1,0,0,1],[0,1,0,-1],
# [0,0,1,1],[0,0,0,0]])
bd2 = numpy.array([[-1,1,0,0],[1,0,-1,0],[0,-1,1,0],
[0,0,0,0],[-1,0,0,1],[0,1,0,-1],
[0,0,-1,1],[0,0,0,0]])
bd3 = numpy.array([[-1],[1],[-1],[1]])
print("0th homology: %d" % bettiNumber(bd0,bd1))
print("1st homology: %d" % bettiNumber(bd1,bd2))
print("2nd homology: %d" % bettiNumber(bd2,bd3))
Generate the matrix in the thesis: quite inefficient, to be optimized
In [ ]:
import numpy
import numpy.linalg
def printMatrix(m):
for row in m:
print(str(row))
def rowSwap(n, i, j):
A = numpy.identity(n)
A[i][i], A[j][j] = 0, 0
A[i][j], A[j][i] = 1, 1
return A
def scaleRow(n, i, c):
A = numpy.identity(n)
A[i][i] = c
return A
def linComb(n, addTo, scaleRow, scaleAmt):
A = numpy.identity(n)
A[addTo][scaleRow] = scaleAmt
return A
'''
X = numpy.array([[1,1,1], [2,2,2], [3,3,3]])
X = rowSwap(3, 0, 2).dot(X)
X = linComb(3, 0, 2, 2).dot(X)
print X
'''
def rref(matrix):
if not matrix: return
numRows = len(matrix)
numCols = len(matrix[0])
basisChange = numpy.identity(numRows)
i,j = 0,0
while True:
if i >= numRows or j >= numCols:
break
if matrix[i][j] == 0:
nonzeroRow = i
while nonzeroRow < numRows and matrix[nonzeroRow][j] == 0:
nonzeroRow += 1
if nonzeroRow == numRows:
j += 1
continue
temp = matrix[i]
matrix[i] = matrix[nonzeroRow]
matrix[nonzeroRow] = temp
basisChange = rowSwap(numRows, i, nonzeroRow).dot(basisChange)
print("row swap %d <-> %d" % (i, nonzeroRow))
pivot = matrix[i][j]
matrix[i] = [x / pivot for x in matrix[i]]
basisChange = scaleRow(numRows, i, 1.0 / pivot).dot(basisChange)
print("scale R %d by %f" % (i, 1.0 / pivot))
for otherRow in range(0, numRows):
if otherRow == i:
continue
if matrix[otherRow][j] != 0:
print("row lin comb: R%d = R%d - %G * R%d" % (otherRow, otherRow, matrix[otherRow][j], i))
basisChange = linComb(numRows, otherRow, i, -matrix[otherRow][j]).dot(basisChange)
matrix[otherRow] = [y - matrix[otherRow][j]*x
for (x,y) in zip(matrix[i], matrix[otherRow])]
i += 1; j+= 1
return matrix, basisChange
bd1 = numpy.array([[-1,-1,-1,-1,0,0,0,0], [1,0,0,0,-1,-1,0,0],
[0,1,0,0,1,0,-1,-1], [0,0,1,0,0,1,1,0], [0,0,0,1,0,0,0,1]])
toReduce = bd1.T.tolist()
rrefbd1T, trans = rref(toReduce)
trans = trans.T
print("A is %r" % trans)
colReduced = bd1.dot(trans)
print("col reduced matrix is %r" % colReduced)
bd2 = numpy.array([[1,1,0,0],[-1,0,1,0],[0,-1,-1,0],
[0,0,0,0],[1,0,0,1],[0,1,0,-1],
[0,0,1,1],[0,0,0,0]])
transInv = numpy.linalg.inv(trans)
print("inv(A) is %r" % transInv)
print("inv(A) * bd2 is %r" % transInv.dot(bd2))
print("We still get bd1 * bd2 = 0 after reducing: %r" % colReduced.dot(transInv.dot(bd2)))
In order to calculate the ranks of interest, we must construct the matrices representing the boundary mappings. In order to do this we must agree upon an ordering of the simplices. So we simply use the one generated by the data structure, which is why each simplex has an index attribute.
To find the rank of $C_p$, which we have called $n_p$, we simply count the number of p-simplices. So we can add that feature to our SimplicialComplex class easily.
In [ ]: