Archive
Visualising negative frequency
Negative frequency is one of those concepts that many people struggle with. Here’s a image that will hopefully help. There’s an svg version available at https://pzdsp.com/negf/negative_frequency.svg.

Negative Frequency GUI Code
function negfgui(varargin)
% An interactive GUI which attempts to help students visualise negative frequencies
% by David Dorran, TU Dublin, March 2022
% reuses some code to track mouse movement from the zpgui function by Tom Krauss from Perdue University
% Youtube video explanation provided at https://youtu.be/Rxc_ypVdruw
global zero_locations diff_equ_text op_text_samps op_freq_pts op_points draw_plt disp_plt op_disp_plt freq_plt xyz_pts op_xyz_pts xy_pts ptr negfqui_fh freq_pts points text_samps b a y gain pole_vals zero_vals freq_resp_pts
buf_len = 50;
tail_len = 10;
max_points = 100000;
fft_len = 1024;
win_func = hamming(buf_len);
if nargin == 0
action = 'init';
else
action = varargin{1};
end
if(strcmp(class(action), 'matlab.ui.control.UIControl'))
zero_vals = str2num(zero_locations.String);
negfgui('update_freq_response');
end
switch action
case 'init'
zero_vals = [ -j j];
pole_vals = [0 0];
if(strcmp(class(negfqui_fh),'matlab.ui.Figure'))
if(~isvalid(negfqui_fh))
negfqui_fh = figure;
end
if(nargin ==0)
close(negfqui_fh);
negfqui_fh = figure;
end
else
negfqui_fh = figure;
end
set(negfqui_fh,'position',[ 1 41 1366 651]);
ptr = 1;
points = ones(1,max_points)*NaN;
op_points = ones(1,max_points)*NaN;
freq_plt = subplot(2,3,1);
fax = (([0:fft_len-1])/fft_len*2*pi)-pi;
yyaxis left
freq_pts = plot(fax,points(1:fft_len));
hold on
op_freq_pts = plot(fax,points(1:fft_len),'r');
xlim([-pi pi])
ylim([0 buf_len/2])
ylabel('Magnitude Content')
yyaxis right
freq_resp_pts = plot(fax,points(1:fft_len));
xlim([-pi pi])
ylim([0 1])
title('Double sided-spectrum');
xlabel('frequency (radians per sample)')
ylabel('Magnitude Response')
draw_plt = subplot(2,3,2);
xy_pts = plot([]);
for m =1:tail_len
hold on
xy_pts(m) = plot(points(m)+points(m)*j,'r.','MarkerSize',tail_len-m+1);
end
ylabel('Y')
xlabel('X')
xlim([-1 1]);
ylim([-1 1]);
disp_plt = subplot(2,3,3);
xyz_pts = plot3(real(points),imag(points),[0:length(points)-1]);
hold on
surf([0 0 ],[-1.1 1.1],[-1 length(points); -1 length(points)],'FaceAlpha',0.2)
surf([-1.1 1.1], [ 0 0 ],[ length(points) length(points); -1 -1],'FaceAlpha',0.2)
xlabel('X')
ylabel('Y')
zlabel('Samples')
xlim([-1 1]);
ylim([-1 1]);
zlim([0 buf_len])
set(gca,'CameraUpVector', [0 1 0 ])
grid on
set(draw_plt,'buttondownfcn', 'negfgui(''draw'')')
op_disp_plt = subplot(2,3,6);
op_xyz_pts = plot3(real(op_points),imag(op_points),[0:length(op_points)-1]);
hold on
surf([0 0 ],[-1.1 1.1],[-1 length(op_points); -1 length(op_points)],'FaceAlpha',0.2)
surf([-1.1 1.1], [ 0 0 ],[ length(op_points) length(op_points); -1 -1],'FaceAlpha',0.2)
xlabel('X')
ylabel('Y')
zlabel('Samples')
xlim([-1 1]);
ylim([-1 1]);
zlim([0 buf_len])
set(gca,'CameraUpVector', [0 1 0 ])
grid on
text_samps = annotation('textbox','position',[.0500 0.48 0.85 0.05],'LineStyle','none','FontSize',10);
op_text_samps = annotation('textbox','position',[.0500 0 0.85 0.05],'LineStyle','none','FontSize',10);
diff_equ_text = annotation('textbox',...
[0.34 0.337941628264209 0.285237188872621 0.0844854070660522],...
'String',[''],...
'FontSize',14,...
'FitBoxToText','off',...
'EdgeColor','none');
annotation('textbox',...
[0.34 0.2 0.285237188872621 0.0844854070660522],...
'String',['Zero Locations:'],...
'FontSize',14,...
'FitBoxToText','off',...
'EdgeColor','none');
zero_locations = uicontrol('style','edit','units','normalized','FontSize',14,'position', [0.34 0.17 0.2 0.05]);
update_button = uicontrol('style','pushbutton', 'string','update locations','units','normalized','position',[0.34 0.12 0.2 0.05],'callback',{@negfgui});
negfgui('update_freq_response')
case 'update_freq_response'
b = poly(zero_vals);
a = poly(pole_vals);
H = freqz(b,a,fft_len,'whole');
gain = max(abs(H));
freq_resp_pts.YData= fftshift(abs(H)/gain); %ensure max gain is 1
b = b/gain;
subplot(2,3,4)
zplane(b,a);
title('Pole-Zero Plot')
try
str = create_difference_equation(b,1,'tex');
diff_equ_text.String = {['Difference Equation:'], str};
catch
diff_equ_text.String = {['b coefficients: ' num2str(b)], ['a coefficients:' num2str(a)]};
end
zero_locations.String = num2str(zero_vals);
case 'draw'
set(gcf,'userdata','')
set(gcf,'windowbuttonmotionfcn','set(gcf,''userdata'',''motion'')')
set(gcf,'windowbuttonupfcn','set(gcf,''userdata'',''up'')')
done = 0;
while ~done
waitfor(gcf,'userdata')
switch get(gcf,'userdata')
case 'motion'
pt = get(draw_plt,'currentpoint');
if(abs(pt(1,1))>1)
pt(1,1) = 1*sign(pt(1,1));
end
if(abs(pt(1,2))>1)
pt(1,2) = 1*sign(pt(1,2));
end
points(ptr) = pt(1,1)+pt(1,2)*j;
xyz_pts.XData(ptr) = real(points(ptr));
xyz_pts.YData(ptr) = imag(points(ptr));
%filter the signal (it's not perfect!)
try
op_points(ptr) = 0;
for k= 1:length(b)
op_points(ptr) = op_points(ptr)+ b(k)*points(ptr-k+1);
end
catch
end
op_xyz_pts.XData(ptr) = real(op_points(ptr));
op_xyz_pts.YData(ptr) = imag(op_points(ptr));
if(ptr>buf_len)
disp_plt.ZLim = [ptr-buf_len ptr];
op_disp_plt.ZLim = [ptr-buf_len ptr];
freq_pts.YData = abs(fftshift(fft(points(ptr-buf_len+1:ptr).*win_func', fft_len)));
op_freq_pts.YData = abs(fftshift(fft(op_points(ptr-buf_len+1:ptr).*win_func', fft_len)));
end
for m =1:tail_len
try
xy_pts(m).XData(1) = real(points(ptr-m+1));
xy_pts(m).YData(1) = imag(points(ptr-m+1));
text_samps.String = num2str(fliplr(round(points(ptr-m+1:ptr)*100)/100));
op_text_samps.String = num2str(fliplr(round(op_points(ptr-m+1:ptr)*100)/100));
catch
text_samps.String = num2str(fliplr(round(points(1:ptr)/100)*100));
end
end
ptr=ptr+1;
if(ptr> max_points)
negfgui('init')
end
case 'up'
done = 1;
end
set(gcf,'userdata','')
end
set(gcf,'windowbuttonmotionfcn','')
set(gcf,'windowbuttonupfcn','')
end
Digital Signal Processing Notes
I’ve made some note relating to Digital Signal Processing and the Discrete Fourier Transform available at http://pzdsp.com/docs.
I intend to add more documents to this over time.
Code to extract music notation from Flowkey app
% Matlab code to extract music notation from flowkey screen grab video . See https://youtu.be/4oOMWkMRCCg
v = VideoReader('takemetochurch.mp4');
ind = 5*25;
frame =v.read(ind);
im = frame(430:660 ,: ,:);
new_im = rgb2gray(im);
prev_x = sum(new_im); % used to find bes overlap of frames
while 1
ind = ind+5*25;
if(ind > v.NumFrames)
break
end
frame =v.read(ind);
im = frame(430:660 ,: ,:);
img = rgb2gray(im); %conver to greyscale
x = sum(img);
xc =[]; %stores cross correlation result
seg2 = prev_x(end-600+1:end); %end of previous frame
for w = 1: 300
seg1 = x(1+w:600+w);
xc(w) = sum(seg1.*seg2)./(sqrt(sum(seg1.^2)*sum(seg2.^2))); %cross correlation
end
[val loc] = max(xc); % loc is where the max correlation occurs
[ r c d] = size(im);
new_seg =img(:,201:end);
[ r1 c1 d1] = size(new_seg);
new_im(:,end-600+1-loc+200:end-600-loc+200+c1) = new_seg;
prev_x = x;
end
img = new_im;
% find where the bars occur in the image
for k =1: length(img)
x(k) = sum(img(95:145,k)); %the bar lines cross are clear from rows 75 to 125
end
bars = find(x<5000); %obtained value of 5000 by visually inspecting plot(x)
op_image_max_width = 2000;
op_image_max_height = 1200;
overlap = 10;
start_pt = overlap+1;
prev_end_pt = bars(1);
op_img = img(1,1);
k = 1;
img_num = 1;
while(k<length(bars))
end_pt = bars(k);
if(end_pt-start_pt>op_image_max_width)
[rows cols ] = size(img(:,start_pt-overlap:prev_end_pt+overlap));
[rows1 cols1] = size(op_img);
if(rows+rows1 > op_image_max_height)
imwrite(op_img, ['notation_' num2str(img_num) '.jpg']);
op_img = img(1,1);
img_num = img_num + 1;
end
op_img(end+1:end+rows,1:cols,:) = img(:,start_pt-10:prev_end_pt+10);
start_pt = prev_end_pt;
end
prev_end_pt = end_pt;
k = k + 1;
end
imwrite(op_img, ['notation_' num2str(img_num) '.jpg']);
Arduino sinuoidal waveform generator and signal capture
I have put together an example on how to use the Arduino nano as a signal generator and digital capture device. It runs a pretty low sampling rate 2 kHz but I thought it might be of some use. Check out a video explanation at https://youtu.be/_31sQn1Cg9g. Files used are available at http://pzdsp.com/shared/arduino_sin_gen.zip
Virtual Oscilloscope
I’ve created a virtual oscilloscope that hopefully will help students learn how to work with real ones! It doesn’t have too many features so students can focus on the fundamentals that appear on all scopes: scaling, positioning and triggering.
Check it out at pzdsp.com/elab
DSP Foundations Notes
I’ve put together a set of notes introducing Digital Signal Processing. There’s only 25 pages but I hope that its enough to get someone started.
interpLTspice
% This function interpolates data returned from the runLTspice function so % as to make the time vector evenly spaced. % % example usage: % asc_file = 'filename.asc'; % volt_curr_list = runLTspice(asc_file); % results = runLTspice(asc_file, volt_curr_list); % fs = 10000; % interp_results = interpLTspice(results, fs); % % By David Dorran (david.dorran@dit.ie) function data_struct = interpLTspice(data_struct, fs) %check if the data is structured as expected data_ok = 0; if(isstruct(data_struct)) data_ok = 1; if ~(isfield(data_struct,'data') && isfield(data_struct,'data_name') && isfield(data_struct,'time_vec')) data_ok = 0; end end if(~data_ok) error('The variable passed to this function is not a data structure returned from runLTspice and cannot be processed') end if~(isnumeric(fs) && length(fs) == 1 && fs > 0) error('The second parameter used in this function should be a positive number representing the sampling frequency') end try t = data_struct.time_vec(1):1/fs:data_struct.time_vec(end); catch error(sprintf('A sampling rate of %f resulted in too much data (memory limits exceeded).\n\nTry a different sampling frequency using interpLTspcice(data_struct,fs);', fs)); end for k = 1:length(data_struct.data) data_struct.data{k} = interp1(data_struct.time_vec, data_struct.data{k}, t); end data_struct.time_vec = t;
runLTspice demo
%% Demonstration of the runLTspice function % % Demo video at http://youtu.be/ax9H4eKuZv4 % % Download runLTspice from: % https://dadorran.wordpress.com/2014/10/29/runltspice-m/ % % Download this code from https://dadorran.wordpress.com % % LTspice schematic available from: % https://www.dropbox.com/s/y7j1vmyscvppsn4/RC.asc?dl=0 % %% read the voltage and currents from the circuit result = runLTspice('C:\Temp\RC.asc', 'V(n002)'); plot(result.time_vec, result.data{1}) xlabel('Time(seconds)') ylabel('Voltage') title(result.data_name{1}) %% apply a step input and plot the capacitor current and voltage wavforms fs = 200; step = [zeros(1, fs/2) ones(1, fs/2)]; res = runLTspice('C:\Temp\RC.asc', 'V(n002), I(C1)' , 'V1', step, fs); plot(res.time_vec, res.data{1}) hold on plot(res.time_vec, res.data{2}*1000,'r') xlabel('Time(seconds)') ylabel('Amplitude') hold off legend([ res.data_name{1} ' (v)'], [res.data_name{2} ' (mA)']) %% apply a noisy square wave and compare the supply voltage with capacitor voltage sq_wav = [step step step step]; high_f_noise = filter(1, [1 0.9], randn(1, length(sq_wav))*0.1); ns_sq_wav = sq_wav + high_f_noise; r = runLTspice('C:\Temp\RC.asc', 'V(n001), V(n002)' , 'V1', ns_sq_wav, fs); plot(r.time_vec, r.data{1}) hold on plot(r.time_vec, r.data{2},'r') legend('supply voltage','smoothed voltage') hold off
Audio time-scale modification – VSOLA algorithm in matlab
function op = vsola(ip, tsm_factor, P) % Implementation of the variable parameter synchronised overlap add VSOLA algorithm % This implementation makes use of the standard SOLA algorithm (Rocus and Wilgus, ICASSP 1986) and some efficient paramter settings for the SOLA algorithm (Dorran, Lawlor and Coyle, ICASSP 2003) and (Dorran, Lawlor and Coyle, DAFX 2003) % Given an input, ip, the longest likely pitch period, P, and and a time scale modification factor, tsm_factor, return an output, op, that is a time scaled version of the input. The synthesis_overlap_size is the length, in samples of the lowest likely fundametal period of the input. % for speech synthesis_overlap_size is set to (16/1000)*fs samples and for music synthesis_overlap_size is typically set to (20/1000)*fs samples % % David Dorran, Audio Research Group, Dublin Institute of Technology % david.dorran@dit.ie % http://eleceng.dit.ie/dorran % http://eleceng.dit.ie/arg % % make sure input is mono and transpose if necessary [r, c] = size(ip); if r > 1 ip = ip'; end; [r, c] = size(ip); if r > 1 disp('Note :only works on mono signals'); op = []; return end; % initialize the values of analysis_frame_length, analysis_window_offset, synthesis_frame_offset and length_of_overlap desired_tsm_len = round(length(ip)*tsm_factor); P = round(P); %longest likely pitch period in samples Lmax = round(P * 1.5);% found this to a reasonable value for the Lmax- Lmax is the duration over which the correlation function is applied stationary_length = (P * 1.5); % found this to a reasonable value for the stationary length - This is the max duration that could be discarded/repeated analysis_window_offset = round((stationary_length - P)/abs(tsm_factor - 1)); % this equation was derived. synthesis_window_offset = round(analysis_window_offset * tsm_factor); analysis_frame_length = round(Lmax + synthesis_window_offset); number_of_analysis_frames = floor((length(ip)- analysis_frame_length)/analysis_window_offset); if number_of_analysis_frames < 2 %not much time-scaling being done just return the input op = ip; return; end; %the next two lines just ensure that the last frame finishes at the very end of the signal (not essential) zpad = zeros(1, (number_of_analysis_frames*analysis_window_offset) + analysis_frame_length - length(ip)); ip = [ip zpad]; %initialize the output op = zeros(1, desired_tsm_len); %initialize the first output frame op(1 : analysis_frame_length) = ip(1 : analysis_frame_length); min_overlap = round(Lmax - P); %ensure that there is some minimum overlap count = 0; % Loop for the 2nd analysis frame to the number_of_analysis_frames for m = 1 : number_of_analysis_frames %grab the mth input frame ip_frame = ip(analysis_window_offset * m : (analysis_window_offset * m) + analysis_frame_length - 1); %grab the maximum overlapping segments from the inout frame and the current output seg_1 = op(round(synthesis_window_offset*(m-1))+analysis_frame_length - Lmax : round(synthesis_window_offset*(m-1))+analysis_frame_length -1); seg_2 = ip_frame(1: Lmax); %compute the correlation of these segments correlation = xcorr(seg_2, seg_1,'coeff'); %Find the best point to overlap (opt_overlap_length) making sure not to exceed the maximum or go below the minimum overlap. correlation(length(correlation) - Lmax -1: length(correlation)) = -100; correlation(1: min_overlap) = -100; [max_correlation, opt_overlap_length] = max(correlation); if(max_correlation == 0) opt_overlap_length = Lmax; end; % offset = Lmax - opt_overlap_length; % if ((offset + analysis_window_offset - synthesis_window_offset) >= 0 & (offset + analysis_window_offset - synthesis_window_offset) <= P) % count = count +1; % end; % append mth analysis frame to the current synthesised output using a linear cross fade ov_seg = linear_cross_fade(seg_1, ip_frame, opt_overlap_length); ov_len =(round(synthesis_window_offset*m)+analysis_frame_length) - (round(synthesis_window_offset*(m-1))+analysis_frame_length - Lmax) + 1; ov_seg = ov_seg(1:ov_len); op(round(synthesis_window_offset*(m-1))+analysis_frame_length - Lmax: round(synthesis_window_offset*m)+analysis_frame_length) = ov_seg; end; % end of for loop % linear cross fade the first segment with the second segment given a certain amount of overlap % |----------seg1----------| % |---------------seg2-----------------------| % |--overlap-| function op = linear_cross_fade(seg_1, seg_2, overlap_length, cross_fade_duration) error(nargchk(3,4,nargin)); if nargin < 4 cross_fade_duration = overlap_length; end if cross_fade_duration > overlap_length cross_fade_duration = overlap_length; end % overlap the end of seg_1 with the start of seg_2 using a linear cross-fade if (length(seg_1) < overlap_length), seg_2 = seg_2(overlap_length - length(seg_1) + 1: length(seg_2)); overlap_length = length(seg_1); end; % end of if statement if (length(seg_2) < overlap_length), seg_1 = seg_1(length(seg_1) - (overlap_length - overlap_length): length(seg_1)); overlap_length = length(seg_2); end; % end of if statement overlapping_region = zeros(1, cross_fade_duration); % initialize the overlapping region seg_1 = seg_1(1: length(seg_1) - (overlap_length - cross_fade_duration)); op = zeros(1, length(seg_1) + length(seg_2) - cross_fade_duration); if(overlap_length ~= 1) linear_ramp1 = (cross_fade_duration-1:-1:0) ./(cross_fade_duration-1); linear_ramp2 = (0:1:cross_fade_duration-1) ./(cross_fade_duration-1); overlapping_region = (seg_1(length(seg_1)-cross_fade_duration+1:length(seg_1)).*linear_ramp1) + (seg_2(1: cross_fade_duration).*linear_ramp2); end; op = [seg_1(1:length(seg_1)-cross_fade_duration) ,overlapping_region , seg_2(cross_fade_duration+1:length(seg_2))]; % END of linear_cross_fade function