SpikeSorting.jl was built with online processing in mind, and is cable of sorting multiple channels simultaneously and taking advantage of multiple cores. How an algorithm scales across channels and cores can depend on the algorithm itself, your sampling rate and the machine it is running on. The simulations below demonstrate performance for various algorithms under some of these circumstances.
In [ ]:
addprocs()
using SpikeSorting, DistributedArrays, PyPlot
#Spike Sorting
s_s1=Array(Array{Sorting,1},64);
s_p1=Array(Any,64);
count1=1
for i=4:4:256
#1 core
s_s1[count1]=create_multi(DetectPower(),ClusterOSort(),AlignMax(),FeatureTime(),ReductionNone(),ThresholdMean(),i);
#4 core
s_p1[count1]=create_multi(DetectPower(),ClusterOSort(),AlignMax(),FeatureTime(),ReductionNone(),ThresholdMean(),i,2:5);
count1+=1
end
In [ ]:
function timing1(mytimes::Array{Float64,2},s_s::Array{Array{Sorting,1},1},s_p)
count=1
for i=4:4:256
#single core
v=rand(1:1000,20000,i);
(buf1,nums1)=output_buffer(i);
cal!(s_s[count],v,buf1,nums1,true)
cal!(s_s[count],v,buf1,nums1);
t1 = @elapsed onlinesort!(s_s[count],v,buf1,nums1);
for j=1:20
t1 = @elapsed onlinesort!(s_s[count],v,buf1,nums1);
if t1<mytimes[count,1]
mytimes[count,1]=t1
end
end
v2=convert(SharedArray{Int64,2},v);
(buf2,nums2)=output_buffer(i,true);
cal!(s_p[count],v2,buf2,nums2,true);
cal!(s_p[count],v2,buf2,nums2);
t2 = @elapsed onlinesort!(s_p[count],v2,buf2,nums2);
for j=1:20
t2 = @elapsed onlinesort!(s_p[count],v2,buf2,nums2);
if t2<mytimes[count,2]
mytimes[count,2]=t2
end
end
count+=1
end
nothing
end
In [ ]:
times=ones(Float64,64,2)
timing1(times,s_s1,s_p1);
In [ ]:
(fig,myax)=subplots(1,1)
plot(4:4:256,times[:,1].*1000,4:4:256,times[:,2].*1000)
myax[:set_xlabel]("Number of Channels")
myax[:set_ylabel]("Time (ms)")
myax[:legend](["1 Core", "4 Core"], loc=2)
In [ ]: