This repository has been archived by the owner on Mar 13, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
batch.8.py
129 lines (115 loc) · 3.95 KB
/
batch.8.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# Update:
# custom Hamiltonian representation
# absolute or relative error 0.01 to reduce computation time
# faster classical solve assuming k <= 8 when n <= 32
# kill simulation if T > 163.84
# pull off_index out of Hamiltonian
# avoid np.random.choice
import numpy as np
import os
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages/')
import progressbar
np.random.seed(int(sys.argv[3]))
PROB_TYPE = sys.argv[1]
n = int(sys.argv[2])
SAMPLE = 13
WALKER = 10000
PROB = 0.5
dt = 0.01
error = 0.01
from Hamiltonian import Hamiltonian
from helper import classical_solve, compress, HB, HP
# Verify if given computation time guarantees sufficient success probability
def run_single (T, ans, cnk, off_index, H_B, H_P):
# Monte Carlo random walk
# sample walkers from initial wave function amplitude
walker_cnt = WALKER
walkers = np.random.randint(cnk, size=walker_cnt) # random walkers
log_weights = np.zeros(walker_cnt) # initial weights are 1
# random walk
for t in np.arange(0.0, T, dt):
# walkers random diffusion
H = H_B * (1.0 - t/T) + H_P * (t/T)
G = H * (-dt) + Hamiltonian.identity(cnk)
for i in range(walker_cnt):
walker = walkers[i] # current value of walker
weight = G.col_sum(walker) # step weight
if np.random.random() >= G.diag_ratio(walker):
walkers[i] = off_index[walker][np.random.randint(off_index[walker].shape[0])]
log_weights[i] += np.log(weight) # walker weight multiplied by step weight
log_weights -= np.average(log_weights) # normalize product of weights to 1
# split walkers with large weight
idx_large = (log_weights > 2)
log_weights[idx_large] -= np.log(2)
walkers = np.append(walkers, walkers[idx_large])
log_weights = np.append(log_weights, log_weights[idx_large])
# kill walkers with small weight
idx_not_small = (log_weights >= -2)
walkers = walkers[idx_not_small]
log_weights = log_weights[idx_not_small]
walker_cnt = walkers.size
# reconstruct wave function from random walkers
psi = np.zeros(cnk)
for i in range(walker_cnt):
psi[walkers[i]] += np.exp(log_weights[i])
psi /= np.linalg.norm(psi) # normalize wave function
'''
# AQC evolution
psi0 = np.array([cnk**(-0.5) for i in range(cnk)])
for t in np.arange(0.0, T, dt):
H = (1.0 - t/T) * H_B0 + t/T * H_P0
psi0 += (-1) * np.dot(H, psi0) * dt
psi0 /= np.linalg.norm(psi0)
print(str(np.sum(psi0[ans]**2)) + ' ' + str(np.sum(psi[ans]**2)))
'''
prob = np.sum(psi[ans]**2)
return prob >= PROB
# Run algorithm on a single random graph of size n; return computation time to achieve probability threshold
def run_random (n):
# Generate random graph G: each edge exists by probability 1/2
G = np.random.randint(2, size=(n, n))
G = G ^ G.T # enforce symmetry and zeros on diagonal
k, ans = classical_solve(n, G)
cnk, rank, knar = compress(n, k)
for i in range(len(ans)):
ans[i] = rank[ans[i]]
off_index = np.zeros((cnk, k*(n-k)))
H_B = HB(n, k, cnk, knar, rank, off_index)
H_P = HP(n, k, cnk, knar, G)
# binary search computation time
T_min = 0
T_max = 1
while not run_single(T_max*dt, ans, cnk, off_index, H_B, H_P):
T_max *= 2
if T_max == 16384:
return k, T_max*dt
while T_max - T_min > 1 and (T_max - T_min) > T_max * error:
T = (T_min + T_max) // 2
if run_single(T*dt, ans, cnk, off_index, H_B, H_P):
T_max = T
else:
T_min = T
return k, T_max*dt
ansall = []
ans = {}
for k in range(0, n+1):
ans[k] = []
pbar = progressbar.ProgressBar(widgets=[
progressbar.Percentage(), ' ',
progressbar.Bar(marker='>', fill='-'), ' ',
progressbar.ETA(), ' ',
])
for it in pbar(range(SAMPLE)):
k, T = run_random(n)
ansall.append(T)
ans[k].append(T)
if not os.path.exists('result/' + sys.argv[3]):
os.makedirs('result/' + sys.argv[3])
sys.stdout = open('result/' + sys.argv[3] + '/' + PROB_TYPE + '_' + str(n) + '.txt', 'w')
print('mean: ' + str(np.mean(np.array(ansall))))
print('median: ' + str(np.median(np.array(ansall))))
print(ansall)
print(' ')
for k,v in ans.iteritems():
print(str(k) + ': ' + str(v))