4. Optimization¶
import quimb as qu
import quimb.tensor as qtn
L = 64
D = 16
pbc = True
psi = qtn.MPS_rand_state(L, bond_dim=D, cyclic=pbc)
ham = qtn.MPO_ham_heis(L, cyclic=pbc)
def norm_fn(psi):
nfact = (psi.H @ psi)**0.5
return psi.multiply(1 / nfact, spread_over='all')
def loss_fn(psi, ham):
b, h, k = qtn.tensor_network_align(psi.H, ham, psi)
energy_tn = b | h | k
return energy_tn ^ ...
tnopt = qtn.TNOptimizer(
# the tensor network we want to optimize
psi,
# the functions specfying the loss and normalization
loss_fn=loss_fn,
norm_fn=norm_fn,
# we specify constants so that the arguments can be converted
# to the desired autodiff backend automatically
loss_constants={"ham": ham},
# the underlying algorithm to use for the optimization
# 'l-bfgs-b' is the default and often good for fast initial progress
optimizer="adam",
autodiff_backend="jax",
)
tnopt
<TNOptimizer(d=32768, backend=jax)>
tnopt.optimize(1000)
-28.294601440430 [best: -28.294666290283] : : 1001it [00:38, 26.15it/s]
<MatrixProductState(tensors=64, indices=128, L=64, max_bond=16)>
tnopt.plot(hlines={'analytic': qu.heisenberg_energy(L)})
(<Figure size 640x480 with 1 Axes>,
<AxesSubplot:xlabel='Iteration', ylabel='Loss'>)