Towards Quantum Machine Learning with Tensor Networks
もちょいみてみます。
Towards Quantum Machine Learning with Tensor Networks
William Huggins,1 Piyush Patil,1 Bradley Mitchell,1 K. Birgitta Whaley,1 and E. Miles Stoudenmire2
1University of California Berkeley, Berkeley, CA 94720 USA
2Center for Computational Quantum Physics, Flatiron Institute, 162 5th Avenue, New York, NY 10010, USA
(Dated: August 1, 2018)
#MERA circuit
def mera(a):
u = Circuit()
u.u3(a[0],a[1],a[2])[0]
u.u3(a[3],a[4],a[5])[1]
u.u3(a[6],a[7],a[8])[2]
u.u3(a[9],a[10],a[11])[3]
u.cx[0,1].cx[2,3]
u.u3(a[12],a[13],a[14])[1]
u.u3(a[15],a[16],a[17])[3]
u.cx[1,3]
return u
from blueqat import Circuit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
%matplotlib inline
np.random.seed(39)
#initial parameters
ainit = [np.random.rand()*np.pi*2 for i in range(18)]
必要な関数を実装します。
Copy
#expectation value
def E(sv):
return sum(np.abs(sv[:8])**2)-sum(np.abs(sv[8:])**2)
#loss function
def L(p,t):
return (p-t)**2
#data to gate
def ix(l):
u = Circuit(4)
for i in l:
u.x[i]
return u
データを準備します。トレーニング用のデータと検証用を準備しました。
Copy
#training data
inp = [[0,1],[2,3],[0],[3]]
tgt = [1,1,-1,-1]
#validation data
inp_c = [[1],[2],[0,2],[1,3]]
tgt_c = [-1,-1,1,1]
早速開始です。毎回の勾配の計算時に訓練データをランダムで選び最適化をかけます。
Copy
#initial parameters
a = ainit.copy()
#result list
ar = []
h = 0.01
e = 0.01
#iterations
nsteps = 800
start = time.time()
for i in range(nsteps):
r = np.random.randint(0,len(inp))
c = ix(inp[r])+mera(a)
loss = L(E(c.run()),tgt[r])
ar.append(loss)
at = [0 for i in range(len(a))]
for j in range(len(a)):
aa = a.copy()
aa[j] += h
loss2 = L(E((ix(inp[r])+mera(aa)).run()),tgt[r])
at[j] = a[j] - e*(loss2 - loss)/h
a = at
plt.plot(ar)
plt.show()
print(time.time() - start)
<Figure size 432x288 with 1 Axes>
5.946179628372192
うまく収束したのでチェックしてみたいと思います。
Copy
#training accuracy
np.mean([E((ix(inp[i])+mera(a)).run())/tgt[i] for i in range(len(inp))])
#=>0.9760245973174358
0.9760245973174352
Copy
#validation accuracy
np.mean([E((ix(inp_c[i])+mera(a)).run())/tgt_c[i] for i in range(len(inp_c))])
#=>0.9769492668551134
#MPS circuit
def mps(a):
u = Circuit()
u.u3(a[0],a[1],a[2])[0]
u.u3(a[3],a[4],a[5])[1]
u.u3(a[6],a[7],a[8])[2]
u.u3(a[9],a[10],a[11])[3]
u.cx[0,1]
u.u3(a[12],a[13],a[14])[1]
u.cx[1,2]
u.u3(a[15],a[16],a[17])[2]
u.cx[2,3]
return u
早速計算します。その他のパラメータ類は先ほどと同じにします。
Copy
start = time.time()
#initial parameters
a = ainit.copy()
#result list
ar = []
h = 0.01
e = 0.01
for i in range(nsteps):
r = np.random.randint(0,len(inp))
loss = L(E((ix(inp[r])+mps(a)).run()),tgt[r])
ar.append(loss)
at = [0 for i in range(len(a))]
for j in range(len(a)):
aa = a.copy()
aa[j] += h
loss2 = L(E((ix(inp[r])+mps(aa)).run()),tgt[r])
at[j] = a[j] - e*(loss2 - loss)/h
a = at
plt.plot(ar)
plt.show()
print(time.time() - start)
<Figure size 432x288 with 1 Axes>
5.819611310958862
7.919758081436157
精度です。
Copy
#training accuracy
np.mean([E((ix(inp[i])+mps(a)).run())/tgt[i] for i in range(len(inp))])
#=>0.9697437828925157
0.9700788727843366
Copy
#validation accuracy
np.mean([E((ix(inp_c[i])+mps(a)).run())/tgt_c[i] for i in range(len(inp_c))])
#=>0.9696755262709482