## How does Julia do special matrices fast?
# You make it go fast, it doesn't do it for you.
module ClassExample
import Base.getindex, Base.size
struct CirculantMatrix{T} <: AbstractArray{T,2}
data::Vector{T}
end
function getindex(A::CirculantMatrix, i::Int, j::Int)
n = length(A.data)
return A.data[mod1(i-j+1, n)] # David doesn't understand why the AI got this right
# but it does seem to be correct....
end
function size(A::CirculantMatrix)
n = length(A.data)
return (n, n)
end
end
ClassExample.CirculantMatrix([1,2,3,4])
eigen(ClassExample.CirculantMatrix([1,2,3,4])) # This is a little bit of a cheat, but it works.
UndefVarError: `eigen` not defined Stacktrace: [1] top-level scope @ In[1]:26
## At the moment, this isn't doing anything special, it's
# converting to a dense matrix, and then computing eigenvalues.
# But we could do something special here.
## Here's an example of something where this is faster.
using LinearAlgebra
n = 1000
A = SymTridiagonal(randn(n), randn(n-1))
1000×1000 SymTridiagonal{Float64, Vector{Float64}}: -0.944569 0.306459 ⋅ … ⋅ ⋅ ⋅ 0.306459 -1.52416 -0.389924 ⋅ ⋅ ⋅ ⋅ -0.389924 -0.954723 ⋅ ⋅ ⋅ ⋅ ⋅ -0.538294 ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ … ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ … ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋮ ⋱ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ … ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ … ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ -2.11325 ⋅ ⋅ ⋅ ⋅ ⋅ 1.18044 0.695838 ⋅ ⋅ ⋅ ⋅ 0.695838 0.659515 0.136827 ⋅ ⋅ ⋅ ⋅ 0.136827 -0.22961
## let's see how long computing the eigenvalues takes...
using BenchmarkTools
@btime eigvals($A);
12.423 ms (4 allocations: 31.94 KiB)
## Let's see how long a general 1000x1000 matrix takes...
B = Matrix(A) # Convert to dense
@btime eigvals($B);
44.352 ms (11 allocations: 7.99 MiB)
##
n = 100000
A = SymTridiagonal(randn(n), randn(n-1))
@time lams = eigvals(A);
111.418769 seconds (7 allocations: 3.052 MiB)
##
@which eigvals(A)