In [1]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import time
sys.path.append('../robert/grpSLOPE/')
import generate_lambdas
sns.set()
np.random.seed(0)
import cProfile
from sklearn.metrics import confusion_matrix
import pandas as pd
plt.rcParams['text.usetex'] = True
In [2]:
%load_ext cython
In [3]:
%%cython -c=-Ofast -c=-march=native -c=-mtune=native -c=-mavx

import numpy as np
import scipy
cimport numpy as np
cimport cython

@cython.cdivision(True) #divide w/ c not python
@cython.boundscheck(False)  # Deactivate bounds checking
@cython.wraparound(False)   # Deactivate negative indexing. 
cdef np.ndarray[np.float64_t, ndim=1] SLOPE_prox(np.ndarray[np.float64_t, ndim=1] y,
                    np.ndarray[np.float64_t, ndim=1] lams):
    '''
    Returns 1D SLOPE prox. Note that this function assumes that the input y is nonnegative.
    This is true when y is a column norm (as we have it here) but not in general.
    '''
    
    #declare variables
    cdef:
        unsigned long i,j,k,n
        double d
        np.ndarray[np.float64_t, ndim=1] s, w, x, sign_y
        np.ndarray[np.uint_t, ndim=1] idx_i, idx_j
        np.ndarray[np.int64_t, ndim=1] argsrt

    n = y.shape[0]
    sign_y = np.sign(y) 
    y = np.abs(y)
    argsrt = np.argsort(np.multiply(-1.0, y)) #argsort sorts in increasing order, so we negate
    y = y[argsrt]    

    #allocate memory as necessary
    x = np.zeros(n, np.float64)
    s = np.zeros(n, np.float64)
    w = np.zeros(n, np.float64)
    idx_i = np.zeros(n, np.uint)
    idx_j = np.zeros(n, np.uint)
    
    #actually do the computation
    k = 0
    for i in range(n): 
        idx_i[k] = i
        idx_j[k] = i
        s[k] = y[i] - lams[i]
        w[k] = s[k]
        while k > 0 and (w[k-1] <= w[k]):
            k -= 1
            idx_j[k] = i
            s[k] += s[k+1]
            w[k] = s[k] / (i - idx_i[k] + 1)
        k += 1
    for j in range(k):
        d = w[j]
        if d < 0:
            d = 0
        for i in range(idx_i[j], idx_j[j] + 1):
            x[i] = d

    np.put(x, argsrt, x)
    x *= sign_y
    return x

@cython.cdivision(True) #divide w/ c not python
@cython.boundscheck(False)  # Deactivate bounds checking
@cython.wraparound(False)   # Deactivate negative indexing. 
cpdef alternating_min(np.ndarray[np.cdouble_t, ndim=2] X,
                      np.ndarray[np.float64_t, ndim=1] lams, int d, 
                      int max_itr=50,
                      float stop_tol=1e-6):
    cdef:
        np.ndarray[np.cdouble_t, ndim=2] Delta = np.zeros((X.shape[0], X.shape[1]), complex)
        np.ndarray[np.cdouble_t, ndim=2] P_A = np.eye(X.shape[0], dtype=complex)
        np.ndarray[np.cdouble_t, ndim=2] I_minus_P_A = np.eye(X.shape[0], dtype=complex)
        np.ndarray[np.cdouble_t, ndim=2] I_minus_PA_times_X 
        #np.ndarray[np.cdouble_t, ndim=2] I_minus_P_A_old =  np.eye(X.shape[0], dtype=complex)

        float I_minus_P_A_step = 1.0
        float c_step = 1.0
        np.ndarray[np.float64_t, ndim=1] c = np.zeros(X.shape[1])
        np.ndarray[np.float64_t, ndim=1] new_c, I_minus_PA_times_X_norms
        np.ndarray[np.cdouble_t, ndim=2] u
        int itr
        
    itr = 0
    for itr in range(max_itr):
        #I commented out the print statements for speed
        #print("=========================================")
        #print(f"Iteration {itr}")

        #min over A
        _, u = scipy.linalg.eigh(np.matmul((X - Delta), (X - Delta).conj().T), 
                             subset_by_index=(X.shape[0]-d, X.shape[0]-1),
                                 check_finite=False,
                                overwrite_a=True)
        #u, _, _ = scipy.linalg.svd(X-Delta, full_matrices=False, overwrite_a=True, check_finite=False, lapack_driver='gesvd')
        #I_minus_P_A = np.matmul(u[:,d:], u[:,d:].conj().T)
        P_A = np.matmul(u[:,:d], u[:,:d].conj().T)
        I_minus_P_A_step = np.linalg.norm(P_A + I_minus_P_A - np.eye(u.shape[0], dtype=complex)) #the algebra works here
        #I_minus_P_A_step = np.linalg.norm(I_minus_P_A - I_minus_P_A_old) #the algebra works here
        #print(f"I_minus_P_A step: {I_minus_P_A_step}") 
        I_minus_P_A = np.eye(u.shape[0], dtype=complex) - P_A

        #min over Delta
        I_minus_PA_times_X = np.matmul(I_minus_P_A, X)
        I_minus_PA_times_X_norms = np.linalg.norm(I_minus_PA_times_X, axis=0)
        new_c = SLOPE_prox(I_minus_PA_times_X_norms, lams)
        c_step = np.linalg.norm(c - new_c)
        #print(f"c step: {c_step}")
        c = new_c
        Delta = np.multiply(I_minus_PA_times_X, np.divide(c, I_minus_PA_times_X_norms))

        if c_step < stop_tol and I_minus_P_A_step < stop_tol:
            #print("Stopping tolerance reached")
            break
            
        #I_minus_P_A_old = I_minus_P_A.copy()
    return Delta, P_A
In [4]:
runtimes = []

Random Interference¶

In [5]:
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
t = np.linspace(0,10,100000) #time
f = 300 #frequency
s = 1
in_phase = np.sin(2*np.pi*f*t)
quadrature = np.cos(2*np.pi*f*t)
eps_sigma = .5
eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
prop_perturbed = .33
r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prop_perturbed, 1-prop_perturbed])
r_sigma = 1.0
r_vals = r_support*(np.random.normal(0, r_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, r_sigma, size=(a.shape[0], len(t))))
x = a[:,np.newaxis]*s + r_vals + eps
In [6]:
T = 100000
q = .1 
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
start = time.time()
Delta, P_A = alternating_min(x[:50,:T], lams, d)
runtimes.append(time.time() - start)
#alpha = 0
In [7]:
cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
df = pd.DataFrame(cf_mat, index=["No Interference", "Interference"], columns=['Guessed No Interference', 'Guessed Interference'])
print(df)
                 Guessed No Interference  Guessed Interference
No Interference                    64726                  2326
Interference                           0                 32948
In [8]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[8]:
0.24973276989626386
In [9]:
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
t = np.linspace(0,10,100000) #time
f = 300 #frequency
s = 1
in_phase = np.sin(2*np.pi*f*t)
quadrature = np.cos(2*np.pi*f*t)
eps_sigma = .5
eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
prop_perturbed = .99
r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prop_perturbed, 1-prop_perturbed])
r_sigma = .5
r_vals = r_support*(np.random.normal(0, r_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, r_sigma, size=(a.shape[0], len(t))))
x = a[:,np.newaxis]*s + r_vals + eps
In [10]:
np.random.seed(0)
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
ps =  np.linspace(.1, .9, 25)
for p in ps:
    num_channels = 50
    d = 1
    delta = 1 #array spacing
    lam = 4 #signal wavelength
    theta = np.pi/4 #DOA
    a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
    t = np.linspace(0,10,100000) #time
    f = 300 #frequency
    s = 1
    in_phase = np.sin(2*np.pi*f*t)
    quadrature = np.cos(2*np.pi*f*t)
    eps_sigma = .5
    eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
    prop_perturbed = p
    r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prop_perturbed, 1-prop_perturbed])
    r_sigma = 1.0
    r_vals = r_support*(np.random.normal(0, r_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, r_sigma, size=(a.shape[0], len(t))))
    x = a[:,np.newaxis]*s + r_vals + eps
    T = 100000
    q = .1
    lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
    start = time.time()
    Delta, P_A = alternating_min(x[:50,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
In [11]:
plt.figure(figsize=(10, 3))
plt.plot(ps, FPR, '--', label=r'FPR')
plt.plot(ps, FNR, label=r'FNR')
plt.legend()
plt.xlabel(r'$p$')
plt.ylabel(r'Rate')
plt.title(r"False Positive and False Negative Rates for Varying $p$")
plt.savefig('varying_p_randintf.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [12]:
np.random.seed(0)
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
sigma_rs=  np.linspace(.5, 10, 25)
for sigma_r in sigma_rs: 
    num_channels = 50
    d = 1
    delta = 1 #array spacing
    lam = 4 #signal wavelength
    theta = np.pi/4 #DOA
    a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
    t = np.linspace(0,10,100000) #time
    f = 300 #frequency
    s = 1
    in_phase = np.sin(2*np.pi*f*t)
    quadrature = np.cos(2*np.pi*f*t)
    eps_sigma = .5
    eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
    prop_perturbed = .33
    r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prop_perturbed, 1-prop_perturbed])
    r_sigma = sigma_r
    r_vals = r_support*(np.random.normal(0, r_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, r_sigma, size=(a.shape[0], len(t))))
    x = a[:,np.newaxis]*s + r_vals + eps
    T = 100000
    q = .1
    lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
    start = time.time()
    Delta, P_A = alternating_min(x[:50,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
In [13]:
plt.figure(figsize=(10, 3))
#multiply by sqrt(2) to convert from complex normal to normal variance
#See remark after Corollary 1 in the paper
plt.plot(np.sqrt(2)*sigma_rs, FPR, '--', label='FPR')
plt.plot(np.sqrt(2)*sigma_rs, FNR, label='FNR')
plt.legend()
plt.xlabel(r'$\sigma_{\delta}$')
plt.ylabel('Rate')
plt.title(r"False Positive and False Negative Rates for Varying $\sigma_{\delta}$")
plt.savefig('varying_sigma_delta_randintf.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [14]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[14]:
0.2499608535398365
In [15]:
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta[:,:T]).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[15]:
0.2499608535398365

Directed Interference, w/ Gaussian random amplitude¶

In [16]:
def dir_ran_intf(num_channels, delta, lam, theta, s, eps_sigma,r_sigma, prob_perturbed, theta_r):
    a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
    t = np.linspace(0,10,100000) #time
    f = 300 #frequency
    in_phase = np.sin(2*np.pi*f*t)
    quadrature = np.cos(2*np.pi*f*t)
    eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
    a_r = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta_r)/lam) #The a vector from the review paper
    r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prob_perturbed, 1-prob_perturbed])
    r_vals = r_support*np.abs(np.random.normal(0, r_sigma, size=(len(t))))
    x = a[:,np.newaxis]*s + a_r[:,np.newaxis]*r_vals + eps
    return x, r_support
In [17]:
#The actual experiment
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
prob_perturbed = .1
s = 1
T = 100000

q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
r_sigma = 1
x, r_support = dir_ran_intf(num_channels, delta, lam, theta, s, eps_sigma,r_sigma, prob_perturbed, theta_r)
In [18]:
T = 100000
start = time.time()
Delta, P_A = alternating_min(x[:,:T], lams, d)
runtimes.append(time.time() - start)
cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
df = pd.DataFrame(cf_mat, index=["No Interference", "Interference"], columns=['Guessed No Interference', 'Guessed Interference'])
In [19]:
#using all the data
T = 100000
u, s, vh = np.linalg.svd(x[:,:T], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[19]:
0.2596681558817325
In [20]:
#discarding samples identified as interference
T = 100000
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[20]:
0.25062641510925343
In [21]:
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
prob_perturbed = .1
s = 1
T = 100000

q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
r_sigma = 1.0

x, r_support = dir_ran_intf(num_channels, delta, lam, theta, s, eps_sigma,r_sigma, prob_perturbed, theta_r)
start = time.time()
Delta, P_A = alternating_min(x[:,:T], lams, d)
runtimes.append(time.time() - start)
cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
df = pd.DataFrame(cf_mat, index=["No Interference", "Interference"], columns=['Guessed No Interference', 'Guessed Interference'])
print(df)
                 Guessed No Interference  Guessed Interference
No Interference                    89349                   729
Interference                        3793                  6129
In [22]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[22]:
0.25062641510925343
In [23]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[23]:
0.2596681558817325
In [24]:
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
ps = np.linspace(.01, .5, 25)
s = 1
T = 100000

q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
r_sigma = 1
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
for p in ps:
    x, r_support = dir_ran_intf(num_channels, delta, lam, theta, s, eps_sigma,r_sigma, p, theta_r)
    start = time.time()
    Delta, P_A = alternating_min(x[:50,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
Prec = np.divide(UL,(UL+LL))
In [25]:
plt.figure(figsize=(10, 3))
plt.plot(ps, FPR, '--', label=r'FPR')
plt.plot(ps, FNR, label=r'FNR')
plt.legend()
plt.xlabel(r'$p$')
plt.ylabel(r'Rate')
plt.title(r"False Positive and False Negative Rates for Varying $p$")
plt.savefig('varying_p_delta_randamp.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [26]:
np.random.seed(0)
num_channels = 50
d = 1
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
p = .1
s = 1
T = 100000

q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
r_sigmas = np.linspace(.5, 4, 25)
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
for r_sigma in r_sigmas:
    x, r_support = dir_ran_intf(num_channels, delta, lam, theta, s, eps_sigma,r_sigma, p, theta_r)
    start = time.time()
    Delta, P_A = alternating_min(x[:,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
Prec = np.divide(UL,(UL+LL))
                      
In [27]:
plt.figure(figsize=(10, 3))
plt.plot(np.sqrt(2)*r_sigmas, FPR, '--', label='FPR')
plt.plot(np.sqrt(2)*r_sigmas, FNR, label='FNR')
plt.legend()
plt.xlabel(r'$\sigma_{\delta}$')
plt.ylabel('Rate')
plt.title(r"False Positive and False Negative Rates for Varying $\sigma_{\delta}$")
plt.savefig('varying_sigma_delta_randamp.pdf', bbox_inches='tight', pad_inches=0)
plt.show()

Directed Interference, w/ constant amplitude¶

In [28]:
def dir_intf(num_channels, delta, lam, theta, s, eps_sigma, prob_perturbed, theta_r, int_s):
    a = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta)/lam) #The a vector from the review paper
    t = np.linspace(0,10,100000) #time
    f = 300 #frequency
    in_phase = np.sin(2*np.pi*f*t)
    quadrature = np.cos(2*np.pi*f*t)
    eps = np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) + 1j*np.random.normal(0, eps_sigma, size=(a.shape[0], len(t))) #noise term
    a_r = np.exp(2j*np.arange(0,num_channels)*np.pi*delta*np.sin(theta_r)/lam) #The a vector from the review paper
    r_support = np.random.choice([True, False], size=len(t), replace=True, p = [prob_perturbed, 1-prob_perturbed])
    r_vals = r_support*int_s
    x = a[:,np.newaxis]*s + a_r[:,np.newaxis]*r_vals + eps
    return x, r_support
In [29]:
np.random.seed(0)
#the actual experiment
num_channels = 50
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
ps = np.linspace(.01, .3, 25)
s = 1
T = 100000
int_s = 1 
q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
for p in ps:
    x, r_support = dir_intf(num_channels, delta, lam, theta, s, eps_sigma, p, theta_r, int_s)
    start = time.time()
    Delta, P_A = alternating_min(x[:,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
Prec = np.divide(UL,(UL+LL))
In [30]:
plt.figure(figsize=(10, 3))
plt.plot(ps, FPR, '--', label=r'FPR')
plt.plot(ps, FNR, label=r'FNR')
plt.legend()
plt.xlabel(r'$p$')
plt.ylabel(r'Rate')
plt.title(r"False Positive and False Negative Rates for Varying $p$")
plt.savefig('varying_p_dir_intf.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [31]:
np.random.seed(0)
#the actual experiment
num_channels = 50
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
p = .1
s = 1
T = 100000
int_S = np.linspace(.5, 2.0, 25)
q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
UR = np.array([])
UL = np.array([])
LR = np.array([])
LL = np.array([])
for int_s in int_S:
    x, r_support = dir_intf(num_channels, delta, lam, theta, s, eps_sigma, p, theta_r, int_s)
    start = time.time()
    Delta, P_A = alternating_min(x[:50,:T], lams, d)
    runtimes.append(time.time() - start)
    cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
    UL = np.append(UL,cf_mat[0,0])
    UR = np.append(UR,cf_mat[0,1])
    LL = np.append(LL,cf_mat[1,0])
    LR = np.append(LR,cf_mat[1,1])
FNR = np.divide(LL,(LL+UL))
FPR = np.divide(UR,(UR+LR))
Prec = np.divide(UL,(UL+LL))
In [32]:
plt.figure(figsize=(10, 3))
plt.plot(np.sqrt(2)*r_sigmas, FPR, '--', label='FPR')
plt.plot(np.sqrt(2)*r_sigmas, FNR, label='FNR')
plt.legend()
plt.xlabel(r'$\sigma_{\delta}$')
plt.ylabel('Rate')
plt.title(r"False Positive and False Negative Rates for Varying $\sigma_{\delta}$")
plt.savefig('varying_sigma_delta_dir_intf.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [33]:
np.random.seed(0)
#the actual experiment
num_channels = 50
delta = 1 #array spacing
lam = 4 #signal wavelength
theta = np.pi/4 #DOA
eps_sigma = .5
prob_perturbed = .1
s = 1
T = 100000
int_s = 1
q = .1
theta_r = np.pi/2
lams = generate_lambdas.lambda_mean(q, eps_sigma, T, 50, d)
x, r_support = dir_intf(num_channels, delta, lam, theta, s, eps_sigma, prob_perturbed, theta_r, int_s)
In [34]:
T = 100000
start = time.time()
Delta, P_A = alternating_min(x[:,:T], lams, d)
runtimes.append(time.time() - start)
cf_mat = confusion_matrix(r_support[:T], np.abs(Delta).sum(axis=0) > 1e-3, labels=[False, True])
df = pd.DataFrame(cf_mat, index=["No Interference", "Interference"], columns=['Guessed No Interference', 'Guessed Interference'])
print(df)
                 Guessed No Interference  Guessed Interference
No Interference                    88768                  1310
Interference                           0                  9922
In [35]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[35]:
0.2622832252228018
In [36]:
T = 100000
u, s, vh = np.linalg.svd(x[:,:T][:,np.abs(Delta).sum(axis=0) <= 1e-3], full_matrices=False)
 #estimate of DOA
np.arcsin(lam*np.angle(u[1,0]/u[0,0])/(2*delta*np.pi))/np.pi # pi*this is estimate of DOA
Out[36]:
0.2494057676043575
In [37]:
plt.figure(figsize=(10,3))
sns.histplot(runtimes)
plt.title(r"Histogram of run times")
plt.xlabel(r'Run time (s)')
plt.savefig('runtimes.pdf', bbox_inches='tight', pad_inches=0)
plt.show()
In [40]:
print(len(runtimes))
154
In [41]:
max(runtimes)
Out[41]:
5.915277004241943
In [ ]: