Skip to content

Commit

Permalink
Implement variance function.
Browse files Browse the repository at this point in the history
  • Loading branch information
bastikr committed Mar 16, 2017
1 parent 14e9316 commit 737f0e0
Show file tree
Hide file tree
Showing 8 changed files with 144 additions and 5 deletions.
2 changes: 2 additions & 0 deletions docs/source/api.rst
Expand Up @@ -92,6 +92,8 @@ Operators
.. jl:autofunction:: operators.jl expect
.. jl:autofunction:: operators.jl variance
.. jl:autofunction:: operators.jl tensor
.. jl:autofunction:: operators.jl embed
Expand Down
2 changes: 1 addition & 1 deletion docs/source/operators.rst
Expand Up @@ -44,7 +44,7 @@ Additionally the following functions are implemented:

* Expectation values:
:jl:func:`expect(::Operator, ::DenseOperator)`

:jl:func:`variance(::Operator, ::DenseOperator)`

* Tensor product:
:jl:func:`tensor(::DenseOperator, ::DenseOperator)`
Expand Down
63 changes: 63 additions & 0 deletions profiling/variance_sparse_dense.jl
@@ -0,0 +1,63 @@
using BenchmarkTools
using QuantumOptics

srand(0)
randstate(b) = normalize(Ket(b, rand(Complex128, length(b))))

function f1(op::SparseOperator, state::DenseOperator)
expect(op*op, state) - expect(op, state)^2
end

function f2(op::SparseOperator, state::DenseOperator)
x = op*state
expect(op, x) - trace(x)^2
end

function f3(op::SparseOperator, state::DenseOperator)
x = op*state
expect(op, x) - expect(op, state)^2
end

N = 1000
b = GenericBasis(N)
s = 0.001
m = SparseOperator(b, sprand(Complex128, N, N, s))
op = m + dagger(m)
x = randstate(b)
y = randstate(b)
state = normalize(x dagger(x) + y dagger(y))

println(f1(op, state))
println(f2(op, state))
println(f3(op, state))

function run_f1(N::Int, op, state)
for i=1:N
f1(op, state)
end
end

function run_f2(N::Int, op, state)
for i=1:N
f2(op, state)
end
end

@time f1(op, state)
@time f1(op, state)
@time f2(op, state)
@time f2(op, state)
@time f3(op, state)
@time f3(op, state)

Profile.clear()
# @profile run_f1(100000, op, state)
# @profile run_f2(100000, op, state)

r1 = @benchmark f1($op, $state)
r2 = @benchmark f2($op, $state)
r3 = @benchmark f3($op, $state)

println(r1)
println(r2)
println(r3)
51 changes: 51 additions & 0 deletions profiling/variance_sparse_ket.jl
@@ -0,0 +1,51 @@
using BenchmarkTools
using QuantumOptics

srand(0)
randstate(b) = normalize(Ket(b, rand(Complex128, length(b))))

function f1(op::SparseOperator, state::Ket)
expect(op^2, state) - expect(op, state)^2
end

function f2(op::SparseOperator, state::Ket)
x = op*state
stateT = dagger(state)
stateT*op*x - (stateT*x)^2
end

N = 1000
b = GenericBasis(N)
s = 0.1
op = SparseOperator(b, sprand(Complex128, N, N, s))
state = randstate(b)

println(f1(op, state))
println(f2(op, state))

function run_f1(N::Int, op, state)
for i=1:N
f1(op, state)
end
end

function run_f2(N::Int, op, state)
for i=1:N
f2(op, state)
end
end

@time f1(op, state)
@time f1(op, state)
@time f2(op, state)
@time f2(op, state)

Profile.clear()
# @profile run_f1(100000, op, state)
# @profile run_f2(100000, op, state)

r2 = @benchmark f2($op, $state)
r1 = @benchmark f1($op, $state)

println(r1)
println(r2)
2 changes: 1 addition & 1 deletion src/QuantumOptics.jl
Expand Up @@ -6,7 +6,7 @@ export bases, Basis, GenericBasis, CompositeBasis,
tensor, , permutesystems,
states, StateVector, Bra, Ket, basisstate,
dagger, normalize, normalize!,
operators, Operator, expect, identityoperator, ptrace, embed,
operators, Operator, expect, variance, identityoperator, ptrace, embed,
operators_dense, DenseOperator, projector,
operators_sparse, SparseOperator, diagonaloperator,
operators_lazysum, LazySum,
Expand Down
14 changes: 13 additions & 1 deletion src/operators.jl
Expand Up @@ -8,7 +8,7 @@ using ..sortedindices, ..bases, ..states

export Operator,
dagger, identityoperator,
trace, ptrace, normalize, normalize!, expect,
trace, ptrace, normalize, normalize!, expect, variance,
tensor, permutesystems, embed


Expand Down Expand Up @@ -80,6 +80,18 @@ expect(op::Operator, state::Ket) = dagger(state)*(op*state)
expect(op::Operator, state::Operator) = trace(op*state)
expect(op::Operator, states::Vector) = [expect(op, state) for state=states]

"""
Variance of the given operator for the specified state(s).
"""
function variance(op::Operator, state::Ket)
x = op*state
stateT = dagger(state)
stateT*op*x - (stateT*x)^2
end
function variance(op::Operator, state::Operator)
expect(op*op, state) - expect(op, state)^2
end
variance(op::Operator, states::Vector) = [variance(op, state) for state=states]

"""
Tensor product of operators.
Expand Down
10 changes: 8 additions & 2 deletions test/test_fock.jl
Expand Up @@ -56,10 +56,16 @@ b2 = FockBasis(5)
b1 = FockBasis(100)
b2 = FockBasis(2, 5)
alpha = complex(3.)
a = destroy(b1)
n = number(b1)
psi = coherentstate(b1, alpha)
rho = psi dagger(psi)

@test 1e-14 > norm(expect(destroy(b1), coherentstate(b1, alpha)) - alpha)
@test 1e-14 > norm(expect(a, psi) - alpha)
@test 1e-14 > norm(expect(a, rho) - alpha)
@test 1e-14 > norm(coherentstate(b1, alpha).data[3:6] - coherentstate(b2, alpha).data)

@test 1e-13 > abs(variance(n, psi) - abs(alpha)^2)
@test 1e-13 > abs(variance(n, rho) - abs(alpha)^2)

# Test qfunc
b = FockBasis(50)
Expand Down
5 changes: 5 additions & 0 deletions test/test_particle.jl
Expand Up @@ -44,6 +44,11 @@ x_bp = positionoperator(basis_momentum)
@test p0 expect(p_bp, psi0_bp)
@test 0.1 > abs(x0 - expect(x_bp, psi0_bp))

@test 1e-13 > abs(variance(x_bx, psi0_bx) - sigma^2/2)
@test 1e-13 > abs(variance(x_bp, psi0_bp) - sigma^2/2)
@test 1e-13 > abs(variance(p_bx, psi0_bx) - 1/(2*sigma^2))
@test 1e-13 > abs(variance(p_bp, psi0_bp) - 1/(2*sigma^2))

# Test potentialoperator
V(x) = x^2
V_bx = potentialoperator(basis_position, V)
Expand Down

0 comments on commit 737f0e0

Please sign in to comment.