This repository has been archived by the owner on Mar 13, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
batch.7.py
119 lines (105 loc) · 3.42 KB
/
batch.7.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# Update:
# Optimization
# walkers birth/death
import numpy as np
import numpy.linalg as la
from scipy.misc import comb
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages/')
import progressbar
np.random.seed(19260817)
PROB_TYPE = sys.argv[1]
n = int(sys.argv[2])
SAMPLE = 10
WALKER = 10000
PROB = 0.5
dt = 0.1
from helper import classical_solve, compress, HB, HP
# Verify if given computation time guarantees sufficient success probability
def run_single (T, ans, cnk, H_B, H_P):
'''
# AQC evolution
psi0 = np.array([cnk**(-0.5) for i in range(cnk)])
for t in np.arange(0.0, T, dt):
H = (1.0 - t/T) * H_B + t/T * H_P
psi0 += (-1) * np.dot(H, psi0) * dt
psi0 /= la.norm(psi0)
'''
# Monte Carlo random walk
# sample walkers from initial wave function amplitude
walker_cnt = WALKER
walkers = np.random.randint(cnk, size=walker_cnt) # random walkers
log_weights = np.zeros(walker_cnt) # initial weights are 1
# random walk
for t in np.arange(0.0, T, dt):
# walkers random diffusion
H = (1.0 - t/T) * H_B + t/T * H_P
G = np.identity(cnk) - dt * H
for i in range(walker_cnt):
walker = walkers[i] # current value of walker
weight = np.sum(G[:,walker]) # step weight
dist = G[:,walker]/weight # distribution of next values
walkers[i] = np.random.choice(cnk, p=dist) # random diffusion
log_weights[i] += np.log(weight) # walker weight multiplied by step weight
log_weights -= np.average(log_weights) # normalize product of weights to 1
# split walkers with large weight
idx_large = (log_weights > 1)
log_weights[idx_large] -= np.log(2)
walkers = np.append(walkers, walkers[idx_large])
log_weights = np.append(log_weights, log_weights[idx_large])
# kill walkers with small weight
idx_not_small = (log_weights >= -1)
walkers = walkers[idx_not_small]
log_weights = log_weights[idx_not_small]
walker_cnt = walkers.size
# reconstruct wave function from random walkers
psi = np.zeros(cnk)
for i in range(walker_cnt):
psi[walkers[i]] += np.exp(log_weights[i])
psi /= la.norm(psi) # normalize wave function
# print(walker_cnt)
# print(str(np.sum(psi0[ans]**2)) + ' ' + str(np.sum(psi[ans]**2)))
prob = np.sum(psi[ans]**2)
return prob >= PROB
# Run algorithm on a single random graph of size n; return computation time to achieve probability threshold
def run_random (n):
# Generate random graph G: each edge exists by probability 1/2
G = np.random.randint(2, size=(n, n))
G = G ^ G.T # enforce symmetry and zeros on diagonal
k, ans = classical_solve(n, G)
cnk, rank, knar = compress(n, k)
H_B = HB(n, k, cnk, knar)
H_P = HP(n, G, cnk, knar)
ans = rank[ans]
# binary search computation time
T_min = 0
T_max = 1
while run_single(T_max*dt, ans, cnk, H_B, H_P) == False:
T_max *= 2
while T_max - T_min > 1:
T = (T_min + T_max) // 2
if run_single(T*dt, ans, cnk, H_B, H_P):
T_max = T
else:
T_min = T
return k, T_max*dt
ansall = []
ans = {}
for k in range(0, n+1):
ans[k] = []
pbar = progressbar.ProgressBar(widgets=[
progressbar.Percentage(), ' ',
progressbar.Bar(marker='>', fill='-'), ' ',
progressbar.ETA(), ' ',
])
for it in pbar(range(SAMPLE)):
k, T = run_random(n)
ansall.append(T)
ans[k].append(T)
sys.stdout = open('output/' + PROB_TYPE + '_' + str(n) + '.txt', 'w')
print('mean: ' + str(np.mean(np.array(ansall))))
print('median: ' + str(np.median(np.array(ansall))))
print(ansall)
print(' ')
for k,v in ans.iteritems():
print(str(k) + ': ' + str(v))