import numpy as np
from IPython.display import Audio
import scipy.io.wavfile
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft, fft2, ifft2, fftshift, ifftshift
#from scipy import ndimage, misc
from scipy.signal import gaussian
from joSigProc import *
# Widening filter on voice
# Hear how the voice starts muffled, but gets more clear?
Omega, f = scipy.io.wavfile.read('Stamp.wav')
Audio(f[15*Omega:32*Omega,0], rate=Omega)
#scipy.io.wavfile.write('Stamp_15-32.wav', Omega, f[15*Omega:32*Omega,0])
# Let's remove the HIGH frequencies
Omega, f = scipy.io.wavfile.read('Stamp.wav')
f1 = f[32*Omega:42*Omega,0]
N = len(f1)
g1 = FilterSignal(f1, 0.05*N, band='low')
PlotFT(f1, Omega, fig=1, color='0.75')
PlotFT(g1, Omega, fig=1, clf=False)
Audio(g1, rate=Omega)
# Let's remove the LOW frequencies.
g2 = FilterSignal(f1, 0.05*N, band='high')
PlotFT(f1, Omega, fig=1, color='0.75')
PlotFT(g2, Omega, fig=1, clf=False)
Audio(g2, rate=Omega)
# Let's add those 2 sounds together.
Audio(g1+g2, rate=Omega)
# Another example.
Omega, r = scipy.io.wavfile.read('amazing_sound.wav')
Audio(r[:,0], rate=Omega)
# :-P
# Narrowing filter on banjo
Omega, f = scipy.io.wavfile.read('TwoHeads.wav')
Audio(f[25*Omega:37*Omega,0], rate=Omega)
# This sound has 2 main components: the voice, and the piano
ff = f[150*Omega:165*Omega,0].copy()
PlotSignal(ff, Omega)
Audio(ff, rate=Omega)
FF = np.fft.fftshift(np.fft.fft(ff))
shifted_omega = ShiftedFreqSamples(ff, Omega)
PlotFT(ff, Omega)
# Filter it, leaving only the low frequencies (below 600 Hz).
T = 600
G = FF.copy()
G[abs(shifted_omega)>T] = 0.
plt.figure(1); plt.clf()
PlotFT_raw(shifted_omega, abs(G), color='r');
g_low = np.real(ifft(ifftshift(G)))
g = g_low
Audio(np.real(g), rate=Omega)
# Now let's filter it, leaving only the frequencies above 1000 Hz.
T = 1000
G = FF.copy()
G[abs(shifted_omega)<T] = 0.
PlotFT_raw(shifted_omega, abs(G), fig=1, clf=False)
g_high = np.real(ifft(ifftshift(G)))
g = g_high
Audio(np.real(g), rate=Omega)
# The original sound, and its Fourier coefficients.
Omega, f = scipy.io.wavfile.read('handel.wav')
F = fftshift(fft(f))
shifted_omega = ShiftedFreqSamples(f, Omega)
t = TimeSamples(f, Omega)
#shifted_omega = ShiftedFreqSamples(f, Omega)
print('Sampling rate is '+str(Omega)+' Hz')
print('Number of samples is '+str(np.shape(f)))
plt.plot(shifted_omega, (abs(F)))
plt.title('Frequency Domain')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Modulus');
Audio(f, rate=Omega)
Sampling rate is 8192 Hz Number of samples is (73113,)
from copy import deepcopy
Fshift = deepcopy(F)
tau_1 = 500
tau_2 = 1200
shift_Hz = 400
L = t[-1]
shift = int(shift_Hz*L)
print(shift)
3569
idx1 = list(shifted_omega>=tau_1).index(True)
idx2 = list(shifted_omega<tau_2).index(False)
idx = np.arange(idx1,idx2)
Fshift[idx+shift] = F[idx]
Fshift[-idx-shift] = F[-idx]
# !!! YIKES !!!
g = ifft(ifftshift(Fshift))
Audio(np.real(g), rate=Omega)
plt.plot(shifted_omega, (abs(Fshift)));