Skip to content

Commit

Permalink
Merge pull request #797 from ajweiss/ajw_lpc_burg
Browse files Browse the repository at this point in the history
Add LPC coefficient estimation via Burg's method
  • Loading branch information
bmcfee committed Mar 26, 2019
2 parents 1746065 + cbeda90 commit d3beb5e
Show file tree
Hide file tree
Showing 4 changed files with 238 additions and 3 deletions.
1 change: 1 addition & 0 deletions librosa/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
resample
get_duration
autocorrelate
lpc
zero_crossings
clicks
tone
Expand Down
163 changes: 160 additions & 3 deletions librosa/core/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,15 @@
import scipy.signal
import resampy

from numba import jit
from .fft import get_fftlib
from .time_frequency import frames_to_samples, time_to_samples
from .._cache import cache
from .. import util
from ..util.exceptions import ParameterError

__all__ = ['load', 'to_mono', 'resample', 'get_duration',
'autocorrelate', 'zero_crossings', 'clicks', 'tone', 'chirp']
__all__ = ['load', 'to_mono', 'resample', 'get_duration', 'autocorrelate',
'lpc', 'zero_crossings', 'clicks', 'tone', 'chirp']

# Resampling bandwidths as percentage of Nyquist
BW_BEST = resampy.filters.get_filter('kaiser_best')[2]
Expand Down Expand Up @@ -441,7 +442,6 @@ def get_duration(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,

return float(n_samples) / sr


@cache(level=20)
def autocorrelate(y, max_size=None, axis=-1):
"""Bounded auto-correlation
Expand Down Expand Up @@ -514,6 +514,163 @@ def autocorrelate(y, max_size=None, axis=-1):
return autocorr


def lpc(y, order):
"""Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on `y` of order `order`. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction in [1]_. N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [1] Larry Marple
A New Autoregressive Spectrum Analysis Algorithm
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length order + 1
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If y is not valid audio as per `util.valid_audio`
- If order < 1 or not integer
FloatingPointError
- If y is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=10)
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> y_hat = scipy.signal.lfilter([0] + -1*a[1:], [1], y)
>>> plt.figure()
>>> plt.plot(y)
>>> plt.plot(y_hat)
>>> plt.legend(['y', 'y_hat'])
>>> plt.title('LP Model Forward Prediction')
"""
if not isinstance(order, int) or order < 1:
raise ParameterError("order must be an integer > 0")

util.valid_audio(y, mono=True)

return __lpc(y, order)


@jit(nopython=True)
def __lpc(y, order):
# This implementation follows the description of Burg's algorithm given in
# section III of Marple's paper referenced in the docstring.
#
# We use the Levinson-Durbin recursion to compute AR coefficients for each
# increasing model order by using those from the last. We maintain two
# arrays and then flip them each time we increase the model order so that
# we may use all the coefficients from the previous order while we compute
# those for the new one. These two arrays hold ar_coeffs for order M and
# order M-1. (Corresponding to a_{M,k} and a_{M-1,k} in eqn 5)
ar_coeffs = np.zeros(order+1, dtype=y.dtype)
ar_coeffs[0] = 1
ar_coeffs_prev = np.zeros(order+1, dtype=y.dtype)
ar_coeffs_prev[0] = 1

# These two arrays hold the forward and backward prediction error. They
# correspond to f_{M-1,k} and b_{M-1,k} in eqns 10, 11, 13 and 14 of
# Marple. First they are used to compute the reflection coefficient at
# order M from M-1 then are re-used as f_{M,k} and b_{M,k} for each
# iteration of the below loop
fwd_pred_error = y[1:]
bwd_pred_error = y[:-1]

# DEN_{M} from eqn 16 of Marple.
den = np.dot(fwd_pred_error, fwd_pred_error) \
+ np.dot(bwd_pred_error, bwd_pred_error)

for i in range(order):
if den <= 0:
raise FloatingPointError('numerical error, input ill-conditioned?')

# Eqn 15 of Marple, with fwd_pred_error and bwd_pred_error
# corresponding to f_{M-1,k+1} and b{M-1,k} and the result as a_{M,M}
reflect_coeff = -2 * np.dot(bwd_pred_error, fwd_pred_error) / den

# Now we use the reflection coefficient and the AR coefficients from
# the last model order to compute all of the AR coefficients for the
# current one. This is the Levinson-Durbin recursion described in
# eqn 5.
# Note 1: We don't have to care about complex conjugates as our signals
# are all real-valued
# Note 2: j counts 1..order+1, i-j+1 counts order..0
# Note 3: The first element of ar_coeffs* is always 1, which copies in
# the reflection coefficient at the end of the new AR coefficient array
# after the preceding coefficients
ar_coeffs_prev, ar_coeffs = ar_coeffs, ar_coeffs_prev
for j in range(1, i+2):
ar_coeffs[j] = ar_coeffs_prev[j] + reflect_coeff*ar_coeffs_prev[i - j + 1]

# Update the forward and backward prediction errors corresponding to
# eqns 13 and 14. We start with f_{M-1,k+1} and b_{M-1,k} and use them
# to compute f_{M,k} and b_{M,k}
fwd_pred_error_tmp = fwd_pred_error
fwd_pred_error = fwd_pred_error + reflect_coeff*bwd_pred_error
bwd_pred_error = bwd_pred_error + reflect_coeff*fwd_pred_error_tmp

# SNIP - we are now done with order M and advance. M-1 <- M

# Compute DEN_{M} using the recursion from eqn 17.
#
# reflect_coeff = a_{M-1,M-1} (we have advanced M)
# den = DEN_{M-1} (rhs)
# bwd_pred_error = b_{M-1,N-M+1} (we have advanced M)
# fwd_pred_error = f_{M-1,k} (we have advanced M)
# den <- DEN_{M} (lhs)
#
q = 1 - reflect_coeff**2
den = q*den - bwd_pred_error[-1]**2 - fwd_pred_error[0]**2

# Shift up forward error.
#
# fwd_pred_error <- f_{M-1,k+1}
# bwd_pred_error <- b_{M-1,k}
#
# N.B. We do this after computing the denominator using eqn 17 but
# before using it in the numerator in eqn 15.
fwd_pred_error = fwd_pred_error[1:]
bwd_pred_error = bwd_pred_error[:-1]

return ar_coeffs


@cache(level=20)
def zero_crossings(y, threshold=1e-10, ref_magnitude=None, pad=True,
zero_pos=True, axis=-1):
Expand Down
47 changes: 47 additions & 0 deletions tests/makeTestData.m
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ function testData(source_path, output_path)
display('beat');
testBeat(output_path);

display('lpcburg');
testLPCBurg(output_path);

%% Done!
display('Done.');
end
Expand Down Expand Up @@ -403,6 +406,50 @@ function testBeat(output_path)

end

function testLPCBurg(output_path)
rng(1);

output_counter = 1;
for m=3:20
signal = {};
est_coeffs = {};
order = {};
true_coeffs = {};

i=1;
for l=pow2(6:16)
if l < m*2
continue
end

% Generate random, but stable filters
while 1
% 0.25... Let's not play the stable filter
% lottery forever
coeffs = rand(1, m-1) .* 0.25;
% Keep away from the edge
if all(abs(roots([1 coeffs])) < 1-10*eps)
break
end
end

% Filter some noise with them and store them for testing
noise = randn(l,1);
signal{i} = filter(1, [1 coeffs], noise);
est_coeffs{i} = arburg(signal{i}, m);
order{i} = m;
true_coeffs{i} = [1 coeffs];

i=i+1;
end

filename = sprintf('%s/core-lpcburg-%03d.mat', output_path, output_counter);
display([' `-- saving ', filename]);
save(filename, 'signal', 'est_coeffs', 'order', 'true_coeffs');
output_counter = output_counter + 1;
end
end

function testChromafb(output_path)

% Test three sample rates
Expand Down
30 changes: 30 additions & 0 deletions tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,36 @@ def __test(y, truth, max_size, axis):
yield __test, y, truth[axis], max_size, axis


def test_lpc_regress():

def __test(signal, order, true_coeffs, est_coeffs):
test_coeffs = librosa.lpc(signal, order)
assert np.allclose(test_coeffs, est_coeffs)

for infile in files(os.path.join('tests', 'data', 'core-lpcburg-*.mat')):
test_data = scipy.io.loadmat(infile, squeeze_me=True);

for i in range(len(test_data['signal'])):
yield (__test,
test_data['signal'][i],
test_data['order'][i],
test_data['true_coeffs'][i],
test_data['est_coeffs'][i])


def test_lpc_simple():
srand()

n = 5000
est_a = np.zeros((n, 6))
truth_a = [1, 0.5, 0.4, 0.3, 0.2, 0.1]
for i in range(n):
noise = np.random.randn(1000)
filtered = scipy.signal.lfilter([1], truth_a, noise)
est_a[i, :] = librosa.lpc(filtered, 5)
assert np.allclose(truth_a, np.mean(est_a, axis=0), rtol=0, atol=1e-3)


def test_to_mono():

def __test(filename, mono):
Expand Down

0 comments on commit d3beb5e

Please sign in to comment.