-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathsample_method.py
126 lines (93 loc) · 3.24 KB
/
sample_method.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 2 18:19:20 2023
@author: Xiwen Chen
"""
import numpy as np
import math
import torch
def rd(X,epsilon2=0.5): #calcualate the additive diveristy
# X = np.concatenate((X_candidate,X_previous),axis=0)
# X = X- np.mean(X,axis = 0)
n,m = X.shape
if n>m:
# t = time.time()
_,s,_ = np.linalg.svd(X.T)
# print(time.time()-t)
else:
# t = time.time()
_,s,_ = np.linalg.svd(X)
# print(time.time()-t)
rate = np.sum(np.log(1+s**2/n*m /epsilon2 ) )
return rate
# X= torch.rand(10,50)
# rd_torch(X,epsilon2=0.5)
# rd(X.numpy(),epsilon2=0.5)
def rd_torch(X,epsilon2=0.5): #calcualate the additive diveristy
# X = np.concatenate((X_candidate,X_previous),axis=0)
X = X- torch.mean(X,dim = 0)
n,m = X.shape
if n>m:
# t = time.time()
_,s,_ = torch.linalg.svd(X.T)
# print(time.time()-t)
else:
# t = time.time()
_,s,_ = torch.linalg.svd(X)
# print(time.time()-t)
rate = torch.sum(torch.log(1+s**2/n*m /epsilon2 ) )
return rate
def dpp(kernel_matrix, max_length, epsilon=1E-10):
"""
fast implementation of the greedy algorithm
:param kernel_matrix: 2-d array
:param max_length: positive int
:param epsilon: small positive scalar
:return: list
"""
item_size = kernel_matrix.shape[0]
cis = np.zeros((max_length, item_size))
di2s = np.copy(np.diag(kernel_matrix)) #shape为(item_size,)
selected_items = list()
selected_item = np.argmax(di2s)
selected_items.append(selected_item)
while len(selected_items) < max_length:
k = len(selected_items) - 1
ci_optimal = cis[:k, selected_item]
di_optimal = math.sqrt(di2s[selected_item])
elements = kernel_matrix[selected_item, :]
eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal
cis[k, :] = eis
di2s -= np.square(eis)
selected_item = np.argmax(di2s)
if di2s[selected_item] < epsilon:
break
selected_items.append(selected_item)
return selected_items
def rd_add(X_candidate,X_previous,epsilon2=0.5): #calcualate the additive diveristy
X = np.concatenate((X_candidate,X_previous),axis=0)
n,m = X.shape
X = X-np.mean(X,axis=0)
if n>m:
# t = time.time()
_,s,_ = np.linalg.svd(X.T)
# print(time.time()-t)
else:
# t = time.time()
_,s,_ = np.linalg.svd(X)
# print(time.time()-t)
rate = np.sum(np.log(1+s**2/n*m /epsilon2 ) )
return rate
# def rd_add(X_candidate,X_previous,epsilon2=0.5): #calcualate the additive diveristy
# X = np.concatenate((X_candidate,X_previous),axis=0)
# n,m = X.shape
# if n>m:
# # t = time.time()
# _,s,_ = np.linalg.svd(X.T)
# # print(time.time()-t)
# else:
# # t = time.time()
# _,s,_ = np.linalg.svd(X)
# # print(time.time()-t)
# rate = np.sum(np.log(1+s**2/n*m /epsilon2 ) )
# return rate