Skip to content

Commit

Permalink
code aesthetics
Browse files Browse the repository at this point in the history
  • Loading branch information
fabian-sp committed Dec 3, 2024
1 parent aacbf98 commit ea99461
Show file tree
Hide file tree
Showing 6 changed files with 451 additions and 252 deletions.
48 changes: 20 additions & 28 deletions src/gglasso/helper/data_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@

from .basic_linalg import trp


def generate_precision_matrix(p=100, M=10, style = 'powerlaw', gamma = 2.8, prob = 0.1, scale = False, seed = None):
def generate_precision_matrix(p=100, M=10, style='powerlaw', gamma=2.8, prob=0.1, scale=False, seed=None):
"""
Generates a sparse precision matrix with associated covariance matrix from a random network.
Expand Down Expand Up @@ -52,8 +51,8 @@ def generate_precision_matrix(p=100, M=10, style = 'powerlaw', gamma = 2.8, prob
L = int(p/M)
assert M*L == p

A = np.zeros((p,p))
Sigma = np.zeros((p,p))
A = np.zeros((p, p))
Sigma = np.zeros((p, p))

if seed is not None:
nxseed = seed
Expand All @@ -66,9 +65,9 @@ def generate_precision_matrix(p=100, M=10, style = 'powerlaw', gamma = 2.8, prob
nxseed = int(nxseed +m)

if style == 'powerlaw':
G_m = nx.generators.random_graphs.random_powerlaw_tree(n=L, gamma=gamma, tries=max(5*p,1000), seed = nxseed)
G_m = nx.generators.random_graphs.random_powerlaw_tree(n=L, gamma=gamma, tries=max(5*p,1000), seed=nxseed)
elif style == 'erdos':
G_m = nx.generators.random_graphs.erdos_renyi_graph(n=L , p=prob, seed=nxseed, directed=False)
G_m = nx.generators.random_graphs.erdos_renyi_graph(n=L, p=prob, seed=nxseed, directed=False)
else:
raise ValueError(f"{style} is not a valid choice for the network generation.")
A_m = nx.to_numpy_array(G_m)
Expand All @@ -79,15 +78,11 @@ def generate_precision_matrix(p=100, M=10, style = 'powerlaw', gamma = 2.8, prob
else:
rng = np.random.default_rng(np.random.randint(low=11111, high=99999))

B1 = rng.uniform(low = .1, high = .4, size = (L,L))
B2 = rng.choice(a = [-1,1], p=[.5, .5], size = (L,L))
B1 = rng.uniform(low=.1, high=.4, size=(L,L))
B2 = rng.choice(a=[-1,1], p=[.5, .5], size=(L,L))

A_m = A_m * (B1*B2)

# only use upper triangle and symmetrize
#A_m = np.triu(A_m)
#A_m = .5 * (A_m + A_m.T)

A[m*L:(m+1)*L, m*L:(m+1)*L] = A_m

row_sum_od = 1.5 * abs(A).sum(axis = 1) + 1e-10
Expand All @@ -105,15 +100,12 @@ def generate_precision_matrix(p=100, M=10, style = 'powerlaw', gamma = 2.8, prob
if D.min() < 1e-8:
A += (0.1+abs(D.min())) * np.eye(p)

#D = np.linalg.eigvalsh(A)
#assert D.min() > 0, f"generated matrix A is not positive definite, min EV is {D.min()}"

Ainv = np.linalg.pinv(A, hermitian = True)
Ainv = np.linalg.pinv(A, hermitian=True)

# scale by inverse of diagonal and 0.6*1/sqrt(d_ii*d_jj) on off-diag
if scale:
d = np.diag(Ainv)
scale_mat = np.tile(np.sqrt(d),(Ainv.shape[0],1))
scale_mat = np.tile(np.sqrt(d),(Ainv.shape[0], 1))
scale_mat = (1/0.6)*(scale_mat.T * scale_mat)
np.fill_diagonal(scale_mat, d)

Expand Down Expand Up @@ -144,7 +136,7 @@ def time_varying_power_network(p=100, K=10, M=10, scale = False, seed = None):
assert M*L == p
assert M >=3

Sigma_0,_ = generate_precision_matrix(p = p, M = M, style = 'powerlaw', scale = scale, seed = seed)
Sigma_0,_ = generate_precision_matrix(p=p, M=M, style='powerlaw', scale=scale, seed=seed)

for k in np.arange(K):
Sigma_k = Sigma_0.copy()
Expand All @@ -156,10 +148,10 @@ def time_varying_power_network(p=100, K=10, M=10, scale = False, seed = None):

Sigma[k,:,:] = Sigma_k

Theta = np.linalg.pinv(Sigma, hermitian = True)
Theta = np.linalg.pinv(Sigma, hermitian=True)

decay = np.exp(-.5 * np.arange(K))
helper = np.ones((K,L,L)) * decay[:,None,None]
helper = np.ones((K, L, L)) * decay[:, None, None]
for k in np.arange(K):
np.fill_diagonal(helper[k,:,:], 1)

Expand All @@ -169,7 +161,7 @@ def time_varying_power_network(p=100, K=10, M=10, scale = False, seed = None):

return Sigma, Theta

def group_power_network(p=100, K=10, M=10, scale = False, seed = None):
def group_power_network(p=100, K=10, M=10, scale=False, seed=None):
"""
generates a power law network. In each single network one block disappears (randomly)
p: dimension
Expand All @@ -181,14 +173,14 @@ def group_power_network(p=100, K=10, M=10, scale = False, seed = None):
L = int(p/M)
assert M*L == p

Sigma_0,_ = generate_precision_matrix(p = p, M = M, style = 'powerlaw', scale = scale, seed = seed)
Sigma_0,_ = generate_precision_matrix(p=p, M=M, style='powerlaw', scale=scale, seed=seed)
# contains the number of the block disappearing for each k=1,..,K
if seed is not None:
rng = np.random.default_rng(seed)
else:
rng = np.random.default_rng(np.random.randint(low=11111, high=99999))

block = rng.integers(M, size = K)
block = rng.integers(M, size=K)

for k in np.arange(K):
Sigma_k = Sigma_0.copy()
Expand All @@ -197,7 +189,7 @@ def group_power_network(p=100, K=10, M=10, scale = False, seed = None):

Sigma[k,:,:] = Sigma_k

Theta = np.linalg.pinv(Sigma, hermitian = True)
Theta = np.linalg.pinv(Sigma, hermitian=True)
Sigma, Theta = ensure_sparsity(Sigma, Theta)

return Sigma, Theta
Expand All @@ -209,12 +201,12 @@ def ensure_sparsity(Sigma, Theta):
D = np.linalg.eigvalsh(Theta)
assert D.min() > 0, "generated matrix Theta is not positive definite"

Sigma = np.linalg.pinv(Theta, hermitian = True)
Sigma = np.linalg.pinv(Theta, hermitian=True)

return Sigma, Theta


def sample_covariance_matrix(Sigma, N, seed = None):
def sample_covariance_matrix(Sigma, N, seed=None):
"""
samples data for a given covariance matrix Sigma (with K layers)
return: sample covariance matrix S
Expand All @@ -226,7 +218,7 @@ def sample_covariance_matrix(Sigma, N, seed = None):
(p,p) = Sigma.shape

sample = rng.multivariate_normal(np.zeros(p), Sigma, N).T
S = np.cov(sample, bias = True)
S = np.cov(sample, bias=True)

else:
assert abs(Sigma - trp(Sigma)).max() <= 1e-10
Expand All @@ -239,7 +231,7 @@ def sample_covariance_matrix(Sigma, N, seed = None):
S = np.zeros((K,p,p))
for k in np.arange(K):
# normalize with N --> bias = True
S[k,:,:] = np.cov(sample[k,:,:], bias = True)
S[k,:,:] = np.cov(sample[k,:,:], bias=True)

return S,sample

Expand Down
6 changes: 0 additions & 6 deletions src/gglasso/helper/experiment_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,6 @@ def plot_runtime(iA, iP, vecN, save = False):
p1 = ax.plot(iA[j]['residual'], c = color_dict['ADMM'], label = 'ADMM residual')
p2 = ax.plot(iP[j]['residual'], c = color_dict['PPDNA'], marker = 'o', markersize = 3, label = 'PPDNA residual')

#ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-6,0.2)
Expand All @@ -213,9 +212,6 @@ def plot_runtime(iA, iP, vecN, save = False):
ax.vlines(iP[j]['iter_admm']-1, 0, 0.2, 'grey')
ax.set_xlim(max(iP[j]['iter_admm'] - 5,1), )

#ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())

if j in [0,2]:
ax.set_ylabel('KKT residual')
if j in [1,3]:
Expand Down Expand Up @@ -377,8 +373,6 @@ def single_surface_plot(L1, L2, C, ax, name = 'eBIC'):

ax.set_xlabel(r'$\lambda_1$', fontsize = 14)
ax.set_ylabel(r'$\lambda_2$', fontsize = 14)
#ax.set_xlabel(r'$w_1$', fontsize = 14)
#ax.set_ylabel(r'$w_2$', fontsize = 14)
ax.set_zlabel(name, fontsize = 14)
ax.view_init(elev = 18, azim = 51)

Expand Down
Loading

0 comments on commit ea99461

Please sign in to comment.