diff --git a/deltasigma/_PlotExampleSpectrum.py b/deltasigma/_PlotExampleSpectrum.py index f6dfd49..ae7202f 100644 --- a/deltasigma/_PlotExampleSpectrum.py +++ b/deltasigma/_PlotExampleSpectrum.py @@ -99,11 +99,10 @@ def PlotExampleSpectrum(ntf, M=1, osr=64, f0=0, quadrature=False): NBW = 1.5/N spec0 = fft(v * window)/(M*N/4) if not quadrature: - freq = np.linspace(0, 0.5, N/2 + 1) - plt.plot(freq, dbv(spec0[:N/2 + 1]), 'c', linewidth=1) - plt.hold(True) + freq = np.linspace(0, 0.5, N//2 + 1) + plt.plot(freq, dbv(spec0[:N//2 + 1]), 'c', linewidth=1) spec_smoothed = circ_smooth(np.abs(spec0)**2., 16) - plt.plot(freq, dbp(spec_smoothed[:N/2 + 1]), 'b', linewidth=3) + plt.plot(freq, dbp(spec_smoothed[:N//2 + 1]), 'b', linewidth=3) Snn = np.abs(evalTF(ntf, np.exp(2j*np.pi*freq)))**2 * 2/12*(delta/M)**2 plt.plot(freq, dbp(Snn*NBW), 'm', linewidth=1) snr = calculateSNR(spec0[f1_bin:f2_bin + 1], fin - f1_bin) @@ -123,15 +122,14 @@ def PlotExampleSpectrum(ntf, M=1, osr=64, f0=0, quadrature=False): freq = np.linspace(-0.5, 0.5, N + 1) freq = freq[:-1] plt.plot(freq, dbv(spec0), 'c', linewidth=1) - plt.hold(True) spec_smoothed = circ_smooth(abs(spec0)**2, 16) plt.plot(freq, dbp(spec_smoothed), 'b', linewidth=3) Snn = abs(evalTF(ntf, np.exp(2j * np.pi * freq))) ** 2 * 2 / 12 * (delta / M) ** 2 plt.plot(freq, dbp(Snn*NBW), 'm', linewidth=1) - snr = calculateSNR(spec0[N/2 + f1_bin:N/2 + f2_bin + 1], fin - f1_bin) + snr = calculateSNR(spec0[N//2 + f1_bin:N//2 + f2_bin + 1], fin - f1_bin) msg = 'SQNR = %.1fdB\n @ A = %.1fdBFS & osr = %.0f' % \ - (snr, dbv(spec0[N/2 + fin]), osr) - if f0 >= 0: + (snr, dbv(spec0[N//2 + fin]), osr) + if f0 >= 0: plt.text(f0 - 0.05, - 15, msg, horizontalalignment='right', verticalalignment='bottom') else: diff --git a/deltasigma/__init__.py b/deltasigma/__init__.py index 0ce1c74..4f0727f 100644 --- a/deltasigma/__init__.py +++ b/deltasigma/__init__.py @@ -915,7 +915,9 @@ # if not os.system('python -c "import matplotlib.pyplot as plt;plt.figure()"') import matplotlib import os -if not ('DISPLAY' in os.environ or os.environ.get('READTHEDOCS', None)): +if not ('DISPLAY' in os.environ + or os.name == 'nt' + or os.environ.get('READTHEDOCS', None)): matplotlib.use('Agg') from ._DocumentNTF import DocumentNTF diff --git a/deltasigma/_bplogsmooth.py b/deltasigma/_bplogsmooth.py index a99b5f7..c01ef13 100644 --- a/deltasigma/_bplogsmooth.py +++ b/deltasigma/_bplogsmooth.py @@ -72,8 +72,8 @@ def bplogsmooth(X, tbin, f0): m = m - int(n) lsb1 = np.concatenate((lsb2[1:] + 1, np.ones((1,)))) - startbin = np.concatenate((lsb1[::-1], usb1)) - 1 - stopbin = np.concatenate((lsb2[::-1], usb2)) - 1 + startbin = np.concatenate((lsb1[::-1], usb1)).astype(np.int) - 1 + stopbin = np.concatenate((lsb2[::-1], usb2)).astype(np.int) - 1 f = ((startbin + stopbin)/2.)/N - f0 p = np.zeros(f.shape) diff --git a/deltasigma/_ds_optzeros.py b/deltasigma/_ds_optzeros.py index 1f93099..2d78e1d 100644 --- a/deltasigma/_ds_optzeros.py +++ b/deltasigma/_ds_optzeros.py @@ -78,7 +78,7 @@ def ds_optzeros(n, opt=1): """ opt = int(opt) if opt == 0: - optZeros = np.zeros((np.ceil(n/2.), )) + optZeros = np.zeros((int(np.ceil(n/2.)), )) else: optZeros = _oznopt[n][opt] diff --git a/deltasigma/_ds_synNTFobj1.py b/deltasigma/_ds_synNTFobj1.py index 9a6065b..7b6278f 100644 --- a/deltasigma/_ds_synNTFobj1.py +++ b/deltasigma/_ds_synNTFobj1.py @@ -16,6 +16,8 @@ """Module providing the ds_synNTFobj1() function """ +from __future__ import division + import numpy as np from ._db import db @@ -37,7 +39,7 @@ def ds_synNTFobj1(x, p, osr, f0): z = np.exp(2j*np.pi*(f0 + 0.5/osr*x)) z = carray(z) if f0 > 0: - z = padt(z, p.shape[0]/2., np.exp(2j*np.pi*f0)) + z = padt(z, p.shape[0]//2, np.exp(2j*np.pi*f0)) z = np.hstack((z, np.conj(z))) z = z[:] diff --git a/deltasigma/_mapABCD.py b/deltasigma/_mapABCD.py index 64e893b..fca6552 100644 --- a/deltasigma/_mapABCD.py +++ b/deltasigma/_mapABCD.py @@ -112,7 +112,7 @@ def mapABCD(ABCD, form='CRFB'): for i in range(1, order, 2): b[i] = b[i] - c[i]*b[i - 1] if odd: - b[i] = b[i] + g[(i - 1)/2]*b[i + 1] + b[i] = b[i] + g[(i - 1)//2]*b[i + 1] yscale = ABCD[order + 1, order] a = a*yscale b[-1] = b[-1]*yscale diff --git a/deltasigma/_mapCtoD.py b/deltasigma/_mapCtoD.py index 206e6b2..459403f 100644 --- a/deltasigma/_mapCtoD.py +++ b/deltasigma/_mapCtoD.py @@ -174,8 +174,8 @@ def mapCtoD(sys_c, t=(0, 1), f0=0.): if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary Bp = Bp + padb(B2, npp) else: - n1 = np.floor(t1) - n2 = np.ceil(t2) - n1 - 1 + n1 = int(np.floor(t1)) + n2 = int(np.ceil(t2)) - n1 - 1 t1 = t1 - n1 t2 = t2 - n2 - n1 if t2 == 1 and D2 != 0: diff --git a/deltasigma/_peakSNR.py b/deltasigma/_peakSNR.py index e69d916..4b1feae 100644 --- a/deltasigma/_peakSNR.py +++ b/deltasigma/_peakSNR.py @@ -89,8 +89,5 @@ def peakSNR(snr, amp): if _debug: import pylab as plt pred = np.dot(A, ab) - hold = plt.ishold() - plt.hold(True) plt.plot(dbv(amp), dbv(pred), '-', color='b') - plt.hold(hold) return peak_snr, peak_amp diff --git a/deltasigma/_plotPZ.py b/deltasigma/_plotPZ.py index 5f0376f..e745951 100644 --- a/deltasigma/_plotPZ.py +++ b/deltasigma/_plotPZ.py @@ -72,8 +72,8 @@ def plotPZ(H, color='b', markersize=5, showlist=False): p = np.real_if_close(np.round(p, 5)) z = np.real_if_close(np.round(z, 5)) - pole_fmt = {'marker': 'x', 'markersize': markersize} - zero_fmt = {'marker': 'o', 'markersize': markersize} + pole_fmt = {'marker': 'x', 'markersize': markersize, 'mew': markersize} + zero_fmt = {'marker': 'o', 'markersize': markersize, 'mew': markersize} if isinstance(color, list) or isinstance(color, tuple): pole_fmt['color'] = color[0] @@ -82,12 +82,10 @@ def plotPZ(H, color='b', markersize=5, showlist=False): pole_fmt['color'] = color zero_fmt['color'] = color - hold_status = plt.ishold() plt.grid(True) # Plot x and o for poles and zeros, respectively plt.plot(p.real, p.imag, linestyle='None', **pole_fmt) - plt.hold(True) if len(z) > 0: plt.plot(z.real, z.imag, linestyle='None', **zero_fmt) @@ -128,6 +126,3 @@ def plotPZ(H, color='b', markersize=5, showlist=False): # plt.axes().set_aspect('equal', 'datalim') plt.ylabel('Imag') plt.xlabel('Real') - - if not hold_status: - plt.hold(False) diff --git a/deltasigma/_predictSNR.py b/deltasigma/_predictSNR.py index 8786fa0..c0c6889 100644 --- a/deltasigma/_predictSNR.py +++ b/deltasigma/_predictSNR.py @@ -243,12 +243,12 @@ def powerGain(num, den, Nimp=100): unstable = False _, (imp, ) = dimpulse((num, den, 1), t=np.linspace(0, Nimp, Nimp)) if np.sum(abs(imp[Nimp - 11:Nimp])) < 1e-08 and Nimp > 50: - Nimp = np.round(Nimp/1.3) + Nimp = int(np.round(Nimp/1.3)) else: while np.sum(abs(imp[Nimp - 11:Nimp])) > 1e-06: Nimp = Nimp*2 _, (imp, ) = dimpulse((num, den, 1), t=np.linspace(0, Nimp, Nimp)) - if np.sum(abs(imp[Nimp - 11:Nimp])) >= 50 or Nimp >= 10000.0: + if np.sum(abs(imp[Nimp - 11:Nimp])) >= 50 or Nimp >= 10000: unstable = True break diff --git a/deltasigma/_pulse.py b/deltasigma/_pulse.py index d441123..b5bf6c8 100644 --- a/deltasigma/_pulse.py +++ b/deltasigma/_pulse.py @@ -120,10 +120,11 @@ def pulse(S, tp=(0., 1.), dt=1., tfinal=10., nosum=False): nis = int(ni/ndac) # notice len(S[0]) is the number of outputs for us - if not nosum: # Sum the responses due to each input set - y = np.zeros((np.ceil(tfinal/float(dt)) + 1, len(S[0]), nis)) + tceil = int(np.ceil(tfinal/float(dt))) + 1 + if not nosum: # Sum the responses due to each input set + y = np.zeros((tceil, len(S[0]), nis)) else: - y = np.zeros((np.ceil(tfinal/float(dt)) + 1, len(S[0]), ni)) + y = np.zeros((tceil, len(S[0]), ni)) for i in range(ndac): n1 = int(np.round(tp[i, 0]/delta_t, 0)) diff --git a/deltasigma/_realizeNTF_ct.py b/deltasigma/_realizeNTF_ct.py index 4d386f5..9b2cd20 100644 --- a/deltasigma/_realizeNTF_ct.py +++ b/deltasigma/_realizeNTF_ct.py @@ -122,7 +122,7 @@ def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None, ntf_z = carray(ntf_z) ntf_p = carray(ntf_p) order = max(ntf_p.shape) - order2 = int(np.floor(order/2.)) + order2 = order//2 odd = order - 2*order2 # compensate for limited accuracy of zero calculation ntf_z[np.abs(ntf_z - 1) < eps**(1./(1. + order))] = 1. @@ -154,7 +154,7 @@ def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None, bp = np.zeros((order2,)) if not multi_timing: # Need direct terms for every interval of memory in the DAC - n_direct = np.ceil(tdac[1]) - 1 + n_direct = int(np.ceil(tdac[1])) - 1 if tdac[0] > 0 and tdac[0] < 1 and tdac[1] > 1 and tdac[1] < 2: n_extra = n_direct - 1 # tdac pulse spans a sample point else: @@ -234,7 +234,7 @@ def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None, else: raise ValueError('Sorry, no code for form "%s".', form) - n_imp = np.ceil(2*order + np.max(tdac2[:, 1]) + 1) + n_imp = int(np.ceil(2*order + np.max(tdac2[:, 1]) + 1)) if method == 'LOOP': # Sample the L1 impulse response y = impL1(ntf, n_imp) diff --git a/deltasigma/_scaleABCD.py b/deltasigma/_scaleABCD.py index af181e3..b397eb4 100644 --- a/deltasigma/_scaleABCD.py +++ b/deltasigma/_scaleABCD.py @@ -26,7 +26,7 @@ from ._simulateQDSM import simulateQDSM -def scaleABCD(ABCD, nlev=2, f=0, xlim=1, ymax=None, umax=None, N_sim=1e5, N0=10): +def scaleABCD(ABCD, nlev=2, f=0, xlim=1, ymax=None, umax=None, N_sim=100_000, N0=10): """Scale the loop filter of a general delta-sigma modulator for dynamic range. The ABCD matrix is scaled so that the state maxima are less than the @@ -96,10 +96,10 @@ def scaleABCD(ABCD, nlev=2, f=0, xlim=1, ymax=None, umax=None, N_sim=1e5, N0=10) # First get a rough estimate of umax. ulist = np.arange(0.1, 1.1, 0.1)*(nlev - 1) umax = nlev - 1 - N = 1000.0 + N = 1000 u0 = np.hstack((np.exp(2j*np.pi*f*np.arange(-N0, 0))*raised_cosine, \ np.exp(2j*np.pi*f*np.arange(0, N)))) \ - + 0.01*np.dot(np.array([[1, 1j]]), npr.randn(2, N + N0)) + + 0.01*np.dot(np.array([[1, 1j]]), npr.randn(2, N + N0)) if not quadrature: u0 = np.real(u0) for u in ulist: diff --git a/deltasigma/_simulateQDSM.py b/deltasigma/_simulateQDSM.py index d69c3ef..ff29f39 100644 --- a/deltasigma/_simulateQDSM.py +++ b/deltasigma/_simulateQDSM.py @@ -28,7 +28,6 @@ from scipy.signal import freqz, tf2zpk from ._config import _debug, setup_args -from ._ds_quantize import ds_quantize from ._evalTF import evalTF from ._partitionABCD import partitionABCD from ._utils import carray, diagonal_indices, _is_zpk, _is_A_B_C_D, _is_num_den @@ -38,13 +37,14 @@ try: import pyximport pyximport.install(setup_args=setup_args, inplace=True) - from ._simulateQDSM_core import simulateQDSM_core except ImportError as e: if _debug: print(str(e)) # we'll just fall back to the Python version pass +from ._simulateQDSM_core import simulateQDSM_core + def simulateQDSM(u, arg2, nlev=2, x0=None): """Simulate a quadrature delta-sigma modulator. diff --git a/deltasigma/_simulateQDSM_core.py b/deltasigma/_simulateQDSM_core.py index 1b87935..c68b66c 100644 --- a/deltasigma/_simulateQDSM_core.py +++ b/deltasigma/_simulateQDSM_core.py @@ -19,9 +19,9 @@ from __future__ import division, print_function import numpy as np - from ._ds_quantize import ds_quantize + def simulateQDSM_core(u, A, B, C, D1, order, nlev, nq, x0): N = u.shape[1] v = np.zeros(shape=(nq, N), dtype='complex128') @@ -40,6 +40,7 @@ def simulateQDSM_core(u, A, B, C, D1, order, nlev, nq, x0): xmax = np.max((np.abs(x0), xmax), axis=0) return v, xn, xmax, y + def ds_qquantize(y, n): """Quadrature quantization """ diff --git a/deltasigma/_simulateSNR.py b/deltasigma/_simulateSNR.py index 778fd14..520b522 100644 --- a/deltasigma/_simulateSNR.py +++ b/deltasigma/_simulateSNR.py @@ -224,10 +224,10 @@ def simulateSNR(arg1, osr, amp=None, f0=0, nlev=2, f=None, k=13, np.arange(Ntransient/2))) if not quadrature: tone = M*np.sin(2*np.pi*F/N*np.arange(N + Ntransient)) - tone[:Ntransient/2] = tone[:Ntransient/2] * soft_start + tone[:Ntransient//2] = tone[:Ntransient//2] * soft_start else: tone = M*np.exp(2j*np.pi*F/N * np.arange(N + Ntransient)) - tone[:Ntransient/2] = tone[:Ntransient/2] * soft_start + tone[:Ntransient//2] = tone[:Ntransient//2] * soft_start if not quadrature_ntf: tone = tone.reshape((1, -1)) tone = np.vstack((np.real(tone), np.imag(tone))) @@ -235,14 +235,14 @@ def simulateSNR(arg1, osr, amp=None, f0=0, nlev=2, f=None, k=13, window = 0.5*(1 - np.cos(2*np.pi*np.arange(N)/N)) if f0 == 0: # Exclude DC and its adjacent bin - inBandBins = int(N/2) + np.arange(3, + inBandBins = int(N//2) + np.arange(3, np.round(N/osr_mult/osr) + 1, dtype=np.int32) F = F - 2 else: f1 = np.round(N*(f0 - 1./osr_mult/osr)) # Should exclude DC - inBandBins = int(N/2) + np.arange(f1, + inBandBins = int(N//2) + np.arange(f1, np.round(N*(f0 + 1./osr_mult/osr)) + 1, dtype=np.int32) F = F - f1 + 1 diff --git a/deltasigma/_synthesizeNTF0.py b/deltasigma/_synthesizeNTF0.py index 5a8915f..3b5f6e6 100644 --- a/deltasigma/_synthesizeNTF0.py +++ b/deltasigma/_synthesizeNTF0.py @@ -30,6 +30,7 @@ optimizing the result. """ +from __future__ import division from warnings import warn import numpy as np @@ -111,7 +112,7 @@ def synthesizeNTF0(order, osr, opt, H_inf, f0): # Determine the zeros. if f0 != 0: # Bandpass design-- halve the order temporarily. - order = order/2 + order = order//2 dw = np.pi/(2*osr) else: dw = np.pi/osr @@ -192,7 +193,7 @@ def synthesizeNTF0(order, osr, opt, H_inf, f0): mb2 = c2pif0 + e2*np.exp(1j*w) p = mb2 - np.sqrt(mb2**2-1) # Reflect poles to be inside the unit circle - out = abs(p)>1 + out = abs(p) > 1 p[out] = 1/p[out] # The following is not exactly what delsig does. p = cplxpair(p) diff --git a/deltasigma/_synthesizeNTF1.py b/deltasigma/_synthesizeNTF1.py index e1bae57..340198c 100644 --- a/deltasigma/_synthesizeNTF1.py +++ b/deltasigma/_synthesizeNTF1.py @@ -57,7 +57,7 @@ def synthesizeNTF1(order, osr, opt, H_inf, f0): # Determine the zeros. if f0 != 0: # Bandpass design-- halve the order temporarily. - order = order/2 + order = order//2 dw = np.pi/(2*osr) else: dw = np.pi/osr @@ -74,7 +74,7 @@ def synthesizeNTF1(order, osr, opt, H_inf, f0): # Bandpass design-- shift and replicate the zeros. order = order*2 z = np.sort(z) + 2*np.pi*f0 - z = np.vstack((z,-z)).transpose().flatten() + z = np.vstack((z, -z)).transpose().flatten() z = np.exp(1j*z) else: z = opt @@ -143,7 +143,7 @@ def synthesizeNTF1(order, osr, opt, H_inf, f0): warn('Danger! Iteration limit exceeded.') else: # Bandpass design - x = 0.3**(order/2-1) # starting guess (not very good for f0~0) + x = 0.3**(order//2-1) # starting guess (not very good for f0~0) if f0 > 0.25: z_inf = 1. else: @@ -196,16 +196,17 @@ def synthesizeNTF1(order, osr, opt, H_inf, f0): # options = optimset(options,'Display','off'); # %options = optimset(options,'Display','iter'); opt_result = fmin_l_bfgs_b(ds_synNTFobj1, x0, args=(p, osr, f0), - approx_grad=True, bounds=list(zip(lb,ub))) - x=opt_result[0] + approx_grad=True, bounds=list(zip(lb, ub))) + x = opt_result[0] x0 = x z = np.exp(2j*np.pi*(f0+0.5/osr*x)) if f0 > 0: - z = padl(z, len(p)/2, np.exp(2j*np.pi*f0)) - z = np.concatenate((z, z.conj()), axis=1) + z = padl(z, len(p)//2, np.exp(2j*np.pi*f0)) + # z = np.concatenate((z, z.conj()), axis=1) + z = np.ravel(np.column_stack( (z, z.conj()) )) if f0 == 0: z = padl(z, len(p), 1) - if np.abs(np.real(evalTF((z, p, k), z_inf)) - H_inf ) < ftol: + if np.abs(np.real(evalTF((z, p, k), z_inf)) - H_inf) < ftol: opt_iteration = 0 else: opt_iteration = opt_iteration - 1 diff --git a/deltasigma/_utils.py b/deltasigma/_utils.py index cd7f665..0585f6f 100644 --- a/deltasigma/_utils.py +++ b/deltasigma/_utils.py @@ -444,7 +444,7 @@ def _get_zpk(arg, input=0): elif _is_zpk(arg): z, p, k = np.atleast_1d(arg[0]), np.atleast_1d(arg[1]), arg[2] elif _is_num_den(arg): - sys = lti(*arg) + sys = lti(*arg).to_zpk() z, p, k = sys.zeros, sys.poles, sys.gain elif _is_A_B_C_D(arg): z, p, k = ss2zpk(*arg, input=input) @@ -520,7 +520,8 @@ def _get_num_den(arg, input=0): A, B, C, D = partitionABCD(arg) num, den = ss2tf(A, B, C, D, input=input) elif isinstance(arg, lti): - num, den = arg.num, arg.den + arx = arg.to_tf() + num, den = arx.num, arx.den elif _is_num_den(arg): num, den = carray(arg[0]).squeeze(), carray(arg[1]).squeeze() elif _is_zpk(arg): @@ -604,16 +605,17 @@ def _getABCD(arg): # ABCD matrix A, B, C, D = partitionABCD(arg) elif isinstance(arg, lti): - A, B, C, D = arg.A, arg.B, arg.C, np.atleast_2d(arg.D) + arx = arg.to_ss() + A, B, C, D = arx.A, arx.B, arx.C, np.atleast_2d(arx.D) elif _is_zpk(arg) or _is_num_den(arg) or _is_A_B_C_D(arg): - sys = lti(*arg) + sys = lti(*arg).to_ss() A, B, C, D = sys.A, sys.B, sys.C, sys.D elif isinstance(arg, collections.Iterable): A, B, C, D = None, None, None, None for i in arg: # Note we do not check if the user has assembled a list with # mismatched lti representations. - sys = lti(*i) if not hasattr(i, 'A') else i + sys = lti(*i).to_ss() if not hasattr(i, 'A') else i if A is None: A = sys.A elif not np.allclose(sys.A, A, atol=1e-8, rtol=1e-5): diff --git a/deltasigma/tests/test_bilogplot.py b/deltasigma/tests/test_bilogplot.py index a360068..67d1d75 100644 --- a/deltasigma/tests/test_bilogplot.py +++ b/deltasigma/tests/test_bilogplot.py @@ -40,8 +40,8 @@ def test_bilogplot(self): ftest = int(np.round(f0*N + 1./3 * fB)) u = 0.5*np.sin(2*np.pi*ftest/N*np.arange(N)) v, xn, xmax, y = ds.simulateDSM(u, H) - spec = np.fft.fft(v*ds.ds_hann(N))/(N/4) - X = spec[:N/2 + 1] + spec = np.fft.fft(v*ds.ds_hann(N))/(N//4) + X = spec[:N//2 + 1] plt.figure() # graphical function: we check it doesn't fail - ds.bilogplot(X, f0*N, ftest, (.03, .3, .3), (-140, 0, 10, 20)) + ds.bilogplot(X, int(f0*N), ftest, (.03, .3, .3), (-140, 0, 10, 20)) diff --git a/deltasigma/tests/test_bplogsmooth.py b/deltasigma/tests/test_bplogsmooth.py index d922394..15f4f75 100644 --- a/deltasigma/tests/test_bplogsmooth.py +++ b/deltasigma/tests/test_bplogsmooth.py @@ -24,7 +24,7 @@ def setUp(self): u = 0.5*np.sin(2*np.pi*ftest/N*np.arange(N)) v, xn, xmax, y = ds.simulateDSM(u, H) spec = np.fft.fft(v*ds.ds_hann(N))/(N/4) - X = spec[:N/2 + 1] + X = spec[:N//2 + 1] self.f, self.p = ds.bplogsmooth(X, ftest, f0) def test_one(self): diff --git a/deltasigma/tests/test_calculateQTF.py b/deltasigma/tests/test_calculateQTF.py index 9e154bc..df51f2c 100644 --- a/deltasigma/tests/test_calculateQTF.py +++ b/deltasigma/tests/test_calculateQTF.py @@ -148,11 +148,16 @@ def test(self): allsortedclose(istf[2], self.istf_k, atol=1e-3, rtol=1e-3) def allsortedclose(a, b, atol=1e-3, rtol=1e-3): - if np.iscomplex(a).any(): + if np.isscalar(a): + pass + elif np.iscomplex(a).any(): a = np.sort_complex(a) else: a = np.sort(a) - if np.iscomplex(b).any(): + + if np.isscalar(b): + pass + elif np.iscomplex(b).any(): b = np.sort_complex(b) else: b = np.sort(b) diff --git a/deltasigma/tests/test_calculateSNR.py b/deltasigma/tests/test_calculateSNR.py index fccb899..db68c16 100644 --- a/deltasigma/tests/test_calculateSNR.py +++ b/deltasigma/tests/test_calculateSNR.py @@ -38,14 +38,14 @@ def test_snr_is_40(self): """ Test that a particular SNR is within roundings errors of 40 (dB?) """ N = self.N - snr = ds.calculateSNR(self.hwfft[:N/2], int(N*self.f1)) + snr = ds.calculateSNR(self.hwfft[:N//2], int(N*self.f1)) # Consider replacing with assertAlmostEqual self.assertTrue(np.allclose(snr, 40, atol=1e-8, rtol=1e-8)) def test_snr_is_inf(self): """ Test that a paricular SNR is infinite. """ N = self.N - hwfft = np.zeros((N/2, )) + hwfft = np.zeros((N//2, )) hwfft[512] = 1.0 # specially crafted to have Inf snr snr = ds.calculateSNR(hwfft[:N/2], 512) self.assertEqual(snr, np.Inf) diff --git a/deltasigma/tests/test_evalTF.py b/deltasigma/tests/test_evalTF.py index cc3f6d7..ebeff18 100644 --- a/deltasigma/tests/test_evalTF.py +++ b/deltasigma/tests/test_evalTF.py @@ -38,7 +38,7 @@ def setUp(self): self.h2 = ds.evalTF(tstr2, z) self.h3 = ds.evalTF(H, z) self.h4 = ds.evalTF(lti(tstr2.zeros, tstr2.poles, tstr2.gain), z) - h5tf = lti(tstr2.zeros, tstr2.poles, tstr2.gain) + h5tf = lti(tstr2.zeros, tstr2.poles, tstr2.gain).to_ss() self.h5 = ds.evalTF((h5tf.A, h5tf.B, h5tf.C, h5tf.D), z) h6tf = np.vstack((np.hstack((h5tf.A, h5tf.B)), np.hstack((h5tf.C, np.atleast_2d(h5tf.D))))) diff --git a/deltasigma/tests/test_exampleMASH.py b/deltasigma/tests/test_exampleMASH.py index 6bf2b24..5cf1731 100644 --- a/deltasigma/tests/test_exampleMASH.py +++ b/deltasigma/tests/test_exampleMASH.py @@ -47,8 +47,8 @@ def testMultipleQ2(self): Amp = ds.undbv(-3) # Test tone amplitude, relative to full-scale. f = 0.3 # will be adjusted to a bin N = 2**12 - f1_bin = np.round(f1*N) - f2_bin = np.round(f2*N) + f1_bin = int(np.round(f1*N)) + f2_bin = int(np.round(f2*N)) fin = np.round(((1 - f)/2*f1 + (f + 1)/2*f2) * N) # input sine t = np.arange(0, N).reshape((1, -1)) diff --git a/deltasigma/tests/test_l1norm.py b/deltasigma/tests/test_l1norm.py index 143700e..b3c7ab0 100644 --- a/deltasigma/tests/test_l1norm.py +++ b/deltasigma/tests/test_l1norm.py @@ -31,8 +31,10 @@ def setUp(self): k = 1. self.zpk_tuple = zeros, poles, k splti = lti(zeros, poles, k) - self.num_den_tuple = (splti.num, splti.den) - self.ABCD_tuple = (splti.A, splti.B, splti.C, splti.D) + spltf = splti.to_tf() + self.num_den_tuple = (spltf.num, spltf.den) + splss = splti.to_ss() + self.ABCD_tuple = (splss.A, splss.B, splss.C, splss.D) self.splti = splti def test_l1norm_1(self): diff --git a/deltasigma/tests/test_partitionABCD.py b/deltasigma/tests/test_partitionABCD.py index c1a6b76..1c60f39 100644 --- a/deltasigma/tests/test_partitionABCD.py +++ b/deltasigma/tests/test_partitionABCD.py @@ -28,7 +28,7 @@ class TestPartitionABCD(unittest.TestCase): def setUp(self): # data for test 1 - self.ob = lti((1, ), (1, 2, 10)) + self.ob = lti((1, ), (1, 2, 10)).to_ss() ab = np.hstack((self.ob.A, self.ob.B)) cd = np.hstack((self.ob.C, self.ob.D.reshape((1,1)))) self.ABCD1 = np.vstack((ab, cd)) diff --git a/deltasigma/tests/test_utils.py b/deltasigma/tests/test_utils.py index 8c1c074..ca1a0d0 100644 --- a/deltasigma/tests/test_utils.py +++ b/deltasigma/tests/test_utils.py @@ -306,10 +306,12 @@ def test_mround(): def test_getABCD(): """Test function for _getABCD()""" H = lti(*((1.,),(-2, -2), 1)) + Htf = H.to_tf() + Hss = H.to_ss() x1 = _getABCD(H) - x2 = _getABCD((H.num, H.den)) + x2 = _getABCD((Htf.num, Htf.den)) x3 = _getABCD((H.zeros, H.poles, H.gain)) - x4 = _getABCD((H.A, H.B, H.C, H.D)) + x4 = _getABCD((Hss.A, Hss.B, Hss.C, Hss.D)) for y1, y2, y3, y4 in zip(x1, x2, x3, x4): assert np.allclose(y1, y2) assert np.allclose(y1, y3)