diff --git a/src/mps/mps.c b/src/mps/mps.c index 6850d1b..3711069 100644 --- a/src/mps/mps.c +++ b/src/mps/mps.c @@ -501,7 +501,7 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ mps_local_orthonormalize_qr(&mps->a[i], &a_tail); // retrieve normalization factor (real-valued since diagonal of 'r' matrix is real) - double nrm = 0; + double norm = 0; // 'a_tail' can be logically zero in case quantum numbers do not match if (a_tail.blocks[0] != NULL) { @@ -509,22 +509,22 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ { case CT_SINGLE_REAL: { - nrm = *((float*)a_tail.blocks[0]->data); + norm = *((float*)a_tail.blocks[0]->data); break; } case CT_DOUBLE_REAL: { - nrm = *((double*)a_tail.blocks[0]->data); + norm = *((double*)a_tail.blocks[0]->data); break; } case CT_SINGLE_COMPLEX: { - nrm = crealf(*((scomplex*)a_tail.blocks[0]->data)); + norm = crealf(*((scomplex*)a_tail.blocks[0]->data)); break; } case CT_DOUBLE_COMPLEX: { - nrm = creal(*((dcomplex*)a_tail.blocks[0]->data)); + norm = creal(*((dcomplex*)a_tail.blocks[0]->data)); break; } default: @@ -534,17 +534,17 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ } } - if (nrm < 0) + if (norm < 0) { // flip sign such that normalization factor is always non-negative rscale_block_sparse_tensor(numeric_neg_one(numeric_real_type(mps->a[i].dtype)), &mps->a[i]); - nrm = -nrm; + norm = -norm; } } delete_block_sparse_tensor(&a_tail); - return nrm; + return norm; } else { @@ -574,7 +574,7 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ mps_local_orthonormalize_rq(&mps->a[0], &a_head); // retrieve normalization factor (real-valued since diagonal of 'r' matrix is real) - double nrm = 0; + double norm = 0; // 'a_head' can be logically zero in case quantum numbers do not match if (a_head.blocks[0] != NULL) { @@ -582,22 +582,22 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ { case CT_SINGLE_REAL: { - nrm = *((float*)a_head.blocks[0]->data); + norm = *((float*)a_head.blocks[0]->data); break; } case CT_DOUBLE_REAL: { - nrm = *((double*)a_head.blocks[0]->data); + norm = *((double*)a_head.blocks[0]->data); break; } case CT_SINGLE_COMPLEX: { - nrm = crealf(*((scomplex*)a_head.blocks[0]->data)); + norm = crealf(*((scomplex*)a_head.blocks[0]->data)); break; } case CT_DOUBLE_COMPLEX: { - nrm = creal(*((dcomplex*)a_head.blocks[0]->data)); + norm = creal(*((dcomplex*)a_head.blocks[0]->data)); break; } default: @@ -606,21 +606,325 @@ double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_ assert(false); } } - if (nrm < 0) + if (norm < 0) { // flip sign such that normalization factor is always non-negative rscale_block_sparse_tensor(numeric_neg_one(numeric_real_type(mps->a[0].dtype)), &mps->a[0]); - nrm = -nrm; + norm = -norm; } } delete_block_sparse_tensor(&a_head); - return nrm; + return norm; } } +//________________________________________________________________________________________________________________________ +/// +/// \brief Left-orthonormalize a local MPS site tensor by a SVD with truncation, and update tensor at next site. +/// +int mps_local_orthonormalize_left_svd(const double tol, const long max_vdim, const bool renormalize, struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_next, struct trunc_info* info) +{ + assert(a->ndim == 3); + assert(a_next->ndim == 3); + + // save original logical dimensions and quantum numbers for later splitting + const long dim_logical_left[2] = { a->dim_logical[0], a->dim_logical[1] }; + qnumber* qnums_logical_left[2]; + for (int i = 0; i < 2; i++) + { + qnums_logical_left[i] = aligned_alloc(MEM_DATA_ALIGN, dim_logical_left[i] * sizeof(qnumber)); + memcpy(qnums_logical_left[i], a->qnums_logical[i], dim_logical_left[i] * sizeof(qnumber)); + } + assert(a->axis_dir[0] == TENSOR_AXIS_OUT && a->axis_dir[1] == TENSOR_AXIS_OUT); + const enum tensor_axis_direction axis_dir_left[2] = { TENSOR_AXIS_OUT, TENSOR_AXIS_OUT }; + + // combine left virtual bond and physical axis + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(a, 0, TENSOR_AXIS_OUT, &a_mat); + delete_block_sparse_tensor(a); + + // perform truncated SVD + struct block_sparse_tensor m0, m1; + int ret = split_block_sparse_matrix_svd(&a_mat, tol, max_vdim, renormalize, SVD_DISTR_RIGHT, &m0, &m1, info); + delete_block_sparse_tensor(&a_mat); + if (ret < 0) { + return ret; + } + + // replace 'a' by reshaped 'm0' matrix + split_block_sparse_tensor_axis(&m0, 0, dim_logical_left, axis_dir_left, (const qnumber**)qnums_logical_left, a); + delete_block_sparse_tensor(&m0); + for (int i = 0; i < 2; i++) { + aligned_free(qnums_logical_left[i]); + } + + // update 'a_next' tensor: multiply with 'm1' from left + struct block_sparse_tensor a_next_update; + block_sparse_tensor_dot(&m1, TENSOR_AXIS_RANGE_TRAILING, a_next, TENSOR_AXIS_RANGE_LEADING, 1, &a_next_update); + delete_block_sparse_tensor(a_next); + move_block_sparse_tensor_data(&a_next_update, a_next); + delete_block_sparse_tensor(&m1); + + return 0; +} + + +//________________________________________________________________________________________________________________________ +/// +/// \brief Right-orthonormalize a local MPS site tensor by a SVD with truncation, and update tensor at previous site. +/// +int mps_local_orthonormalize_right_svd(const double tol, const long max_vdim, const bool renormalize, struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_prev, struct trunc_info* info) +{ + assert(a->ndim == 3); + assert(a_prev->ndim == 3); + + // save original logical dimensions and quantum numbers for later splitting + const long dim_logical_right[2] = { a->dim_logical[1], a->dim_logical[2] }; + qnumber* qnums_logical_right[2]; + for (int i = 0; i < 2; i++) + { + qnums_logical_right[i] = aligned_alloc(MEM_DATA_ALIGN, dim_logical_right[i] * sizeof(qnumber)); + memcpy(qnums_logical_right[i], a->qnums_logical[1 + i], dim_logical_right[i] * sizeof(qnumber)); + } + assert(a->axis_dir[1] == TENSOR_AXIS_OUT && a->axis_dir[2] == TENSOR_AXIS_IN); + const enum tensor_axis_direction axis_dir_right[2] = { TENSOR_AXIS_OUT, TENSOR_AXIS_IN }; + + // combine physical and right virtual bond axis + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(a, 1, TENSOR_AXIS_IN, &a_mat); + delete_block_sparse_tensor(a); + + // perform truncated SVD + struct block_sparse_tensor m0, m1; + int ret = split_block_sparse_matrix_svd(&a_mat, tol, max_vdim, renormalize, SVD_DISTR_LEFT, &m0, &m1, info); + delete_block_sparse_tensor(&a_mat); + if (ret < 0) { + return ret; + } + + // replace 'a' by reshaped 'm1' matrix + split_block_sparse_tensor_axis(&m1, 1, dim_logical_right, axis_dir_right, (const qnumber**)qnums_logical_right, a); + delete_block_sparse_tensor(&m1); + for (int i = 0; i < 2; i++) { + aligned_free(qnums_logical_right[i]); + } + + // update 'a_prev' tensor: multiply with 'm0' from right + struct block_sparse_tensor a_prev_update; + block_sparse_tensor_dot(a_prev, TENSOR_AXIS_RANGE_TRAILING, &m0, TENSOR_AXIS_RANGE_LEADING, 1, &a_prev_update); + delete_block_sparse_tensor(a_prev); + move_block_sparse_tensor_data(&a_prev_update, a_prev); + delete_block_sparse_tensor(&m0); + + return 0; +} + + +//________________________________________________________________________________________________________________________ +/// +/// \brief Compress and orthonormalize a MPS by site-local SVDs and singular value truncations. +/// +/// Returns original norm and scaling factor due to compression. +/// +int mps_compress(const double tol, const long max_vdim, const enum mps_orthonormalization_mode mode, struct mps* mps, double* restrict norm, double* restrict scale, struct trunc_info* info) +{ + const bool renormalize = false; + + if (mode == MPS_ORTHONORMAL_LEFT) + { + // transform to right-canonical form first + (*norm) = mps_orthonormalize_qr(mps, MPS_ORTHONORMAL_RIGHT); + + for (int i = 0; i < mps->nsites - 1; i++) + { + int ret = mps_local_orthonormalize_left_svd(tol, max_vdim, renormalize, &mps->a[i], &mps->a[i + 1], &info[i]); + if (ret < 0) { + return ret; + } + } + + // last tensor + const int i = mps->nsites - 1; + assert(mps->a[i].dim_logical[2] == 1); + + // create a dummy "tail" tensor + const long dim_tail[3] = { mps->a[i].dim_logical[2], 1, mps->a[i].dim_logical[2] }; + const enum tensor_axis_direction axis_dir_tail[3] = { TENSOR_AXIS_OUT, TENSOR_AXIS_OUT, TENSOR_AXIS_IN }; + qnumber qsite_tail[1] = { 0 }; + const qnumber* qnums_tail[3] = { mps->a[i].qnums_logical[2], qsite_tail, mps->a[i].qnums_logical[2] }; + struct block_sparse_tensor a_tail; + allocate_block_sparse_tensor(mps->a[i].dtype, 3, dim_tail, axis_dir_tail, qnums_tail, &a_tail); + assert(a_tail.dim_blocks[0] == 1 && a_tail.dim_blocks[1] == 1 && a_tail.dim_blocks[2] == 1); + // set single entry to 1 + assert(a_tail.blocks[0] != NULL); + memcpy(a_tail.blocks[0]->data, numeric_one(a_tail.blocks[0]->dtype), sizeof_numeric_type(a_tail.blocks[0]->dtype)); + + // orthonormalize last MPS tensor + int ret = mps_local_orthonormalize_left_svd(tol, max_vdim, renormalize, &mps->a[i], &a_tail, &info[i]); + if (ret < 0) { + return ret; + } + assert(a_tail.dtype == mps->a[i].dtype); + assert(a_tail.dim_logical[0] == 1 && a_tail.dim_logical[1] == 1 && a_tail.dim_logical[2] == 1); + // quantum numbers for 'a_tail' should match due to preceeding QR orthonormalization + assert(a_tail.blocks[0] != NULL); + switch (a_tail.blocks[0]->dtype) + { + case CT_SINGLE_REAL: + { + float d = *((float*)a_tail.blocks[0]->data); + // absorb potential phase factor into MPS tensor + if (d < 0) { + scale_block_sparse_tensor(numeric_neg_one(CT_SINGLE_REAL), &mps->a[i]); + } + (*scale) = fabsf(d); + break; + } + case CT_DOUBLE_REAL: + { + double d = *((double*)a_tail.blocks[0]->data); + // absorb potential phase factor into MPS tensor + if (d < 0) { + scale_block_sparse_tensor(numeric_neg_one(CT_DOUBLE_REAL), &mps->a[i]); + } + (*scale) = fabs(d); + break; + } + case CT_SINGLE_COMPLEX: + { + scomplex d = *((scomplex*)a_tail.blocks[0]->data); + // absorb potential phase factor into MPS tensor + float abs_d = cabsf(d); + if (abs_d != 0) { + scomplex phase = d / abs_d; + scale_block_sparse_tensor(&phase, &mps->a[i]); + } + (*scale) = abs_d; + break; + } + case CT_DOUBLE_COMPLEX: + { + dcomplex d = *((dcomplex*)a_tail.blocks[0]->data); + // absorb potential phase factor into MPS tensor + double abs_d = cabs(d); + if (abs_d != 0) { + dcomplex phase = d / abs_d; + scale_block_sparse_tensor(&phase, &mps->a[i]); + } + (*scale) = abs_d; + break; + } + default: + { + // unknown data type + assert(false); + } + } + + delete_block_sparse_tensor(&a_tail); + } + else + { + assert(mode == MPS_ORTHONORMAL_RIGHT); + + // transform to left-canonical form first + (*norm) = mps_orthonormalize_qr(mps, MPS_ORTHONORMAL_LEFT); + + for (int i = mps->nsites - 1; i > 0; i--) + { + int ret = mps_local_orthonormalize_right_svd(tol, max_vdim, renormalize, &mps->a[i], &mps->a[i - 1], &info[i]); + if (ret < 0) { + return ret; + } + } + + // first tensor + assert(mps->a[0].dim_logical[0] == 1); + + // create a dummy "head" tensor + const long dim_head[3] = { mps->a[0].dim_logical[0], 1, mps->a[0].dim_logical[0] }; + const enum tensor_axis_direction axis_dir_head[3] = { TENSOR_AXIS_OUT, TENSOR_AXIS_OUT, TENSOR_AXIS_IN }; + qnumber qsite_head[1] = { 0 }; + const qnumber* qnums_head[3] = { mps->a[0].qnums_logical[0], qsite_head, mps->a[0].qnums_logical[0] }; + struct block_sparse_tensor a_head; + allocate_block_sparse_tensor(mps->a[0].dtype, 3, dim_head, axis_dir_head, qnums_head, &a_head); + assert(a_head.dim_blocks[0] == 1 && a_head.dim_blocks[1] == 1 && a_head.dim_blocks[2] == 1); + // set single entry to 1 + assert(a_head.blocks[0] != NULL); + memcpy(a_head.blocks[0]->data, numeric_one(a_head.blocks[0]->dtype), sizeof_numeric_type(a_head.blocks[0]->dtype)); + + // orthonormalize first MPS tensor + int ret = mps_local_orthonormalize_right_svd(tol, max_vdim, renormalize, &mps->a[0], &a_head, &info[0]); + if (ret < 0) { + return ret; + } + assert(a_head.dtype == mps->a[0].dtype); + assert(a_head.dim_logical[0] == 1 && a_head.dim_logical[1] == 1 && a_head.dim_logical[2] == 1); + // quantum numbers for 'a_head' should match due to preceeding QR orthonormalization + assert(a_head.blocks[0] != NULL); + switch (a_head.blocks[0]->dtype) + { + case CT_SINGLE_REAL: + { + float d = *((float*)a_head.blocks[0]->data); + // absorb potential phase factor into MPS tensor + if (d < 0) { + scale_block_sparse_tensor(numeric_neg_one(CT_SINGLE_REAL), &mps->a[0]); + } + (*scale) = fabsf(d); + break; + } + case CT_DOUBLE_REAL: + { + double d = *((double*)a_head.blocks[0]->data); + // absorb potential phase factor into MPS tensor + if (d < 0) { + scale_block_sparse_tensor(numeric_neg_one(CT_DOUBLE_REAL), &mps->a[0]); + } + (*scale) = fabs(d); + break; + } + case CT_SINGLE_COMPLEX: + { + scomplex d = *((scomplex*)a_head.blocks[0]->data); + // absorb potential phase factor into MPS tensor + float abs_d = cabsf(d); + if (abs_d != 0) { + scomplex phase = d / abs_d; + scale_block_sparse_tensor(&phase, &mps->a[0]); + } + (*scale) = abs_d; + break; + } + case CT_DOUBLE_COMPLEX: + { + dcomplex d = *((dcomplex*)a_head.blocks[0]->data); + // absorb potential phase factor into MPS tensor + double abs_d = cabs(d); + if (abs_d != 0) { + dcomplex phase = d / abs_d; + scale_block_sparse_tensor(&phase, &mps->a[0]); + } + (*scale) = abs_d; + break; + } + default: + { + // unknown data type + assert(false); + } + } + + delete_block_sparse_tensor(&a_head); + } + + return 0; +} + + //________________________________________________________________________________________________________________________ /// /// \brief Split a MPS tensor with dimension `D0 x d0*d1 x D2` into two MPS tensors diff --git a/src/mps/mps.h b/src/mps/mps.h index bd7f09c..ba5ef8d 100644 --- a/src/mps/mps.h +++ b/src/mps/mps.h @@ -61,11 +61,6 @@ double mps_norm(const struct mps* psi); // orthonormalization and canonical forms -void mps_local_orthonormalize_qr(struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_next); - -void mps_local_orthonormalize_rq(struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_prev); - - /// \brief MPS orthonormalization mode. enum mps_orthonormalization_mode { @@ -73,9 +68,23 @@ enum mps_orthonormalization_mode MPS_ORTHONORMAL_RIGHT = 1, //!< right-orthonormal }; +void mps_local_orthonormalize_qr(struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_next); + +void mps_local_orthonormalize_rq(struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_prev); + double mps_orthonormalize_qr(struct mps* mps, const enum mps_orthonormalization_mode mode); +int mps_local_orthonormalize_left_svd(const double tol, const long max_vdim, const bool renormalize, + struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_next, struct trunc_info* info); + +int mps_local_orthonormalize_right_svd(const double tol, const long max_vdim, const bool renormalize, + struct block_sparse_tensor* restrict a, struct block_sparse_tensor* restrict a_prev, struct trunc_info* info); + +int mps_compress(const double tol, const long max_vdim, const enum mps_orthonormalization_mode mode, + struct mps* mps, double* restrict norm, double* restrict scaling, struct trunc_info* info); + + //________________________________________________________________________________________________________________________ // diff --git a/src/tensor/block_sparse_tensor.c b/src/tensor/block_sparse_tensor.c index 6b5f211..2a14d75 100644 --- a/src/tensor/block_sparse_tensor.c +++ b/src/tensor/block_sparse_tensor.c @@ -2312,6 +2312,42 @@ bool block_sparse_tensor_is_identity(const struct block_sparse_tensor* t, const } +//________________________________________________________________________________________________________________________ +/// +/// \brief Test whether a block-sparse tensors is an isometry within tolerance 'tol'. +/// +bool block_sparse_tensor_is_isometry(const struct block_sparse_tensor* t, const double tol, const bool transpose) +{ + // must be a matrix + if (t->ndim != 2) { + return false; + } + + // entry-wise conjugation + struct block_sparse_tensor tc; + copy_block_sparse_tensor(t, &tc); + conjugate_block_sparse_tensor(&tc); + // revert tensor axes directions for multiplication + tc.axis_dir[0] = -tc.axis_dir[0]; + tc.axis_dir[1] = -tc.axis_dir[1]; + + struct block_sparse_tensor t2; + if (!transpose) { + block_sparse_tensor_dot(&tc, TENSOR_AXIS_RANGE_LEADING, t, TENSOR_AXIS_RANGE_LEADING, 1, &t2); + } + else { + block_sparse_tensor_dot(t, TENSOR_AXIS_RANGE_TRAILING, &tc, TENSOR_AXIS_RANGE_TRAILING, 1, &t2); + } + + const bool is_isometry = block_sparse_tensor_is_identity(&t2, tol); + + delete_block_sparse_tensor(&t2); + delete_block_sparse_tensor(&tc); + + return is_isometry; +} + + //________________________________________________________________________________________________________________________ /// /// \brief Overall number of entries in the blocks of the tensor. diff --git a/src/tensor/block_sparse_tensor.h b/src/tensor/block_sparse_tensor.h index 664f369..31c9a2d 100644 --- a/src/tensor/block_sparse_tensor.h +++ b/src/tensor/block_sparse_tensor.h @@ -156,6 +156,8 @@ bool block_sparse_tensor_allclose(const struct block_sparse_tensor* restrict s, bool block_sparse_tensor_is_identity(const struct block_sparse_tensor* t, const double tol); +bool block_sparse_tensor_is_isometry(const struct block_sparse_tensor* t, const double tol, const bool transpose); + //________________________________________________________________________________________________________________________ // diff --git a/src/tensor/dense_tensor.c b/src/tensor/dense_tensor.c index 1269f34..bc74686 100644 --- a/src/tensor/dense_tensor.c +++ b/src/tensor/dense_tensor.c @@ -2745,3 +2745,52 @@ bool dense_tensor_is_identity(const struct dense_tensor* t, const double tol) return true; } + +//________________________________________________________________________________________________________________________ +/// +/// \brief Test whether a dense tensors is an isometry within tolerance 'tol'. +/// +bool dense_tensor_is_isometry(const struct dense_tensor* t, const double tol, const bool transpose) +{ + // must be a matrix + if (t->ndim != 2) { + return false; + } + + bool is_isometry; + if (t->dtype == CT_SINGLE_REAL || t->dtype == CT_DOUBLE_REAL) + { + struct dense_tensor t2; + if (!transpose) { + dense_tensor_dot(t, TENSOR_AXIS_RANGE_LEADING, t, TENSOR_AXIS_RANGE_LEADING, 1, &t2); + } + else { + dense_tensor_dot(t, TENSOR_AXIS_RANGE_TRAILING, t, TENSOR_AXIS_RANGE_TRAILING, 1, &t2); + } + + is_isometry = dense_tensor_is_identity(&t2, tol); + + delete_dense_tensor(&t2); + } + else + { + struct dense_tensor tc; + copy_dense_tensor(t, &tc); + conjugate_dense_tensor(&tc); + + struct dense_tensor t2; + if (!transpose) { + dense_tensor_dot(&tc, TENSOR_AXIS_RANGE_LEADING, t, TENSOR_AXIS_RANGE_LEADING, 1, &t2); + } + else { + dense_tensor_dot(t, TENSOR_AXIS_RANGE_TRAILING, &tc, TENSOR_AXIS_RANGE_TRAILING, 1, &t2); + } + + is_isometry = dense_tensor_is_identity(&t2, tol); + + delete_dense_tensor(&t2); + delete_dense_tensor(&tc); + } + + return is_isometry; +} diff --git a/src/tensor/dense_tensor.h b/src/tensor/dense_tensor.h index 9fa8471..697f6f3 100644 --- a/src/tensor/dense_tensor.h +++ b/src/tensor/dense_tensor.h @@ -216,3 +216,5 @@ void dense_tensor_block(const struct dense_tensor* restrict t, const long* restr bool dense_tensor_allclose(const struct dense_tensor* restrict s, const struct dense_tensor* restrict t, const double tol); bool dense_tensor_is_identity(const struct dense_tensor* t, const double tol); + +bool dense_tensor_is_isometry(const struct dense_tensor* t, const double tol, const bool transpose); diff --git a/test/algorithm/test_bond_ops.c b/test/algorithm/test_bond_ops.c index 4325e36..14a5611 100644 --- a/test/algorithm/test_bond_ops.c +++ b/test/algorithm/test_bond_ops.c @@ -165,36 +165,16 @@ char* test_split_block_sparse_matrix_svd() if (d == 0) { // a1 must be an isometry - struct block_sparse_tensor a1c; - copy_block_sparse_tensor(&a1, &a1c); - conjugate_block_sparse_tensor(&a1c); - // revert tensor axes directions for multiplication - a1c.axis_dir[0] = -a1c.axis_dir[0]; - a1c.axis_dir[1] = -a1c.axis_dir[1]; - struct block_sparse_tensor p; - block_sparse_tensor_dot(&a1, TENSOR_AXIS_RANGE_TRAILING, &a1c, TENSOR_AXIS_RANGE_TRAILING, 1, &p); - if (!block_sparse_tensor_is_identity(&p, 5e-6)) { + if (!block_sparse_tensor_is_isometry(&a1, 5e-6, true)) { return "'a1' matrix is not an isometry"; } - delete_block_sparse_tensor(&p); - delete_block_sparse_tensor(&a1c); } else { // a0 must be an isometry - struct block_sparse_tensor a0c; - copy_block_sparse_tensor(&a0, &a0c); - conjugate_block_sparse_tensor(&a0c); - // revert tensor axes directions for multiplication - a0c.axis_dir[0] = -a0c.axis_dir[0]; - a0c.axis_dir[1] = -a0c.axis_dir[1]; - struct block_sparse_tensor p; - block_sparse_tensor_dot(&a0c, TENSOR_AXIS_RANGE_LEADING, &a0, TENSOR_AXIS_RANGE_LEADING, 1, &p); - if (!block_sparse_tensor_is_identity(&p, 5e-6)) { - return "'a1' matrix is not an isometry"; + if (!block_sparse_tensor_is_isometry(&a0, 5e-6, false)) { + return "'a0' matrix is not an isometry"; } - delete_block_sparse_tensor(&p); - delete_block_sparse_tensor(&a0c); } // reassemble matrix after splitting, for comparison with reference @@ -277,19 +257,9 @@ char* test_split_block_sparse_matrix_svd_zero() if (d == 1) { // a0 must be an isometry - struct block_sparse_tensor a0c; - copy_block_sparse_tensor(&a0, &a0c); - conjugate_block_sparse_tensor(&a0c); - // revert tensor axes directions for multiplication - a0c.axis_dir[0] = -a0c.axis_dir[0]; - a0c.axis_dir[1] = -a0c.axis_dir[1]; - struct block_sparse_tensor p; - block_sparse_tensor_dot(&a0c, TENSOR_AXIS_RANGE_LEADING, &a0, TENSOR_AXIS_RANGE_LEADING, 1, &p); - if (!block_sparse_tensor_is_identity(&p, 5e-6)) { - return "'a1' matrix is not an isometry"; + if (!block_sparse_tensor_is_isometry(&a0, 5e-6, false)) { + return "'a0' matrix is not an isometry"; } - delete_block_sparse_tensor(&p); - delete_block_sparse_tensor(&a0c); } // reassemble matrix after splitting, to ensure dimension compatibilty diff --git a/test/mps/data/test_mps_compress.hdf5 b/test/mps/data/test_mps_compress.hdf5 new file mode 100644 index 0000000..20abbec Binary files /dev/null and b/test/mps/data/test_mps_compress.hdf5 differ diff --git a/test/mps/test_mps.c b/test/mps/test_mps.c index 846a98a..0c79430 100644 --- a/test/mps/test_mps.c +++ b/test/mps/test_mps.c @@ -194,7 +194,7 @@ char* test_mps_orthonormalize_qr() struct block_sparse_tensor vec_ref; mps_to_statevector(&mps, &vec_ref); - double nrm = mps_orthonormalize_qr(&mps, m == 0 ? MPS_ORTHONORMAL_LEFT : MPS_ORTHONORMAL_RIGHT); + double norm = mps_orthonormalize_qr(&mps, m == 0 ? MPS_ORTHONORMAL_LEFT : MPS_ORTHONORMAL_RIGHT); if (!mps_is_consistent(&mps)) { return "internal MPS consistency check failed"; @@ -210,12 +210,39 @@ char* test_mps_orthonormalize_qr() } // scaled vector representation must agree with original vector - float nrmf = (float)nrm; - rscale_block_sparse_tensor(&nrmf, &vec); - if (!block_sparse_tensor_allclose(&vec, &vec_ref, nrm * 1e-6)) { + float normf = (float)norm; + rscale_block_sparse_tensor(&normf, &vec); + if (!block_sparse_tensor_allclose(&vec, &vec_ref, norm * 1e-6)) { return "vector representation of MPS after orthonormalization does not match reference"; } + if (m == 0) + { + for (int i = 0; i < nsites; i++) + { + // mps.a[i] must be an isometry + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(&mps.a[i], 0, TENSOR_AXIS_OUT, &a_mat); + if (!block_sparse_tensor_is_isometry(&a_mat, 5e-6, false)) { + return "MPS tensor is not isometric"; + } + delete_block_sparse_tensor(&a_mat); + } + } + else + { + for (int i = 0; i < nsites; i++) + { + // mps.a[i] must be an isometry + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(&mps.a[i], 1, TENSOR_AXIS_IN, &a_mat); + if (!block_sparse_tensor_is_isometry(&a_mat, 5e-6, true)) { + return "MPS tensor is not isometric"; + } + delete_block_sparse_tensor(&a_mat); + } + } + delete_block_sparse_tensor(&vec_ref); delete_block_sparse_tensor(&vec); delete_mps(&mps); @@ -234,6 +261,157 @@ char* test_mps_orthonormalize_qr() } +char* test_mps_compress() +{ + hid_t file = H5Fopen("../test/mps/data/test_mps_compress.hdf5", H5F_ACC_RDONLY, H5P_DEFAULT); + if (file < 0) { + return "'H5Fopen' in test_mps_compress failed"; + } + + // number of lattice sites + const int nsites = 6; + // local physical dimension + const long d = 3; + + // physical quantum numbers + qnumber* qsite = aligned_alloc(MEM_DATA_ALIGN, d * sizeof(qnumber)); + if (read_hdf5_attribute(file, "qsite", H5T_NATIVE_INT, qsite) < 0) { + return "reading physical quantum numbers from disk failed"; + } + + // virtual bond quantum numbers + const long dim_bonds[7] = { 1, 23, 75, 102, 83, 30, 1 }; + qnumber** qbonds = aligned_alloc(MEM_DATA_ALIGN, (nsites + 1) * sizeof(qnumber*)); + for (int i = 0; i < nsites + 1; i++) + { + qbonds[i] = aligned_alloc(MEM_DATA_ALIGN, dim_bonds[i] * sizeof(qnumber)); + char varname[1024]; + sprintf(varname, "qbond%i", i); + if (read_hdf5_attribute(file, varname, H5T_NATIVE_INT, qbonds[i]) < 0) { + return "reading virtual bond quantum numbers from disk failed"; + } + } + + const long max_vdim = 1024; + + for (int j = 0; j < 2; j++) // determines compression tolerance + { + for (int m = 0; m < 2; m++) + { + struct mps mps; + allocate_mps(CT_SINGLE_COMPLEX, nsites, d, qsite, dim_bonds, (const qnumber**)qbonds, &mps); + + // read MPS tensors from disk + for (int i = 0; i < nsites; i++) + { + // read dense tensors from disk + struct dense_tensor a_dns; + allocate_dense_tensor(mps.a[i].dtype, mps.a[i].ndim, mps.a[i].dim_logical, &a_dns); + char varname[1024]; + sprintf(varname, "a%i", i); + if (read_hdf5_dataset(file, varname, H5T_NATIVE_FLOAT, a_dns.data) < 0) { + return "reading tensor entries from disk failed"; + } + + dense_to_block_sparse_tensor_entries(&a_dns, &mps.a[i]); + + delete_dense_tensor(&a_dns); + } + + if (!mps_is_consistent(&mps)) { + return "internal MPS consistency check failed"; + } + + for (int i = 0; i < nsites + 1; i++) { + if (mps_bond_dim(&mps, i) != dim_bonds[i]) { + return "MPS virtual bond dimension does not match reference"; + } + } + + // convert original MPS to state vector + struct block_sparse_tensor vec_ref; + mps_to_statevector(&mps, &vec_ref); + + // perform compression + const double tol_compress = (j == 0 ? 0. : 1e-4); + double norm; + double scale; + struct trunc_info* info = aligned_calloc(MEM_DATA_ALIGN, nsites, sizeof(struct trunc_info)); + if (mps_compress(tol_compress, max_vdim, m == 0 ? MPS_ORTHONORMAL_LEFT : MPS_ORTHONORMAL_RIGHT, &mps, &norm, &scale, info) < 0) { + return "'mps_compress' failed internally"; + } + + if (!mps_is_consistent(&mps)) { + return "internal MPS consistency check failed"; + } + + if (fabs(scale - 1.) > (tol_compress == 0. ? 1e-6 : 1e-4)) { + return "scaling factor due to truncated SVD in MPS compression not close to 1"; + } + + // must be normalized after compression + if (fabs(mps_norm(&mps) - 1.) > 1e-6) { + return "MPS is not normalized after compression"; + } + + if (m == 0) + { + for (int i = 0; i < nsites; i++) + { + // mps.a[i] must be an isometry + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(&mps.a[i], 0, TENSOR_AXIS_OUT, &a_mat); + if (!block_sparse_tensor_is_isometry(&a_mat, 5e-6, false)) { + return "MPS tensor is not isometric"; + } + delete_block_sparse_tensor(&a_mat); + } + } + else + { + for (int i = 0; i < nsites; i++) + { + // mps.a[i] must be an isometry + struct block_sparse_tensor a_mat; + flatten_block_sparse_tensor_axes(&mps.a[i], 1, TENSOR_AXIS_IN, &a_mat); + if (!block_sparse_tensor_is_isometry(&a_mat, 5e-6, true)) { + return "MPS tensor is not isometric"; + } + delete_block_sparse_tensor(&a_mat); + } + } + + // convert compressed MPS to state vector + struct block_sparse_tensor vec; + mps_to_statevector(&mps, &vec); + + // compare with original state vector + float normf = (float)norm; + rscale_block_sparse_tensor(&normf, &vec); + if (!block_sparse_tensor_allclose(&vec, &vec_ref, tol_compress == 0. ? 1e-6 : 0.05)) { + return "vector representation of MPS after compression is not close to original state vector"; + } + + aligned_free(info); + delete_block_sparse_tensor(&vec_ref); + delete_block_sparse_tensor(&vec); + delete_mps(&mps); + } + } + + for (int i = 0; i < nsites + 1; i++) + { + aligned_free(qbonds[i]); + } + aligned_free(qbonds); + aligned_free(qsite); + + H5Fclose(file); + + return 0; +} + + char* test_mps_split_tensor_svd() { hid_t file = H5Fopen("../test/mps/data/test_mps_split_tensor_svd.hdf5", H5F_ACC_RDONLY, H5P_DEFAULT); diff --git a/test/mps/test_mps.py b/test/mps/test_mps.py index 0112dde..1d32d84 100644 --- a/test/mps/test_mps.py +++ b/test/mps/test_mps.py @@ -78,6 +78,38 @@ def mps_orthonormalize_qr_data(): file[f"a{i}"] = interleave_complex(ai.transpose((1, 0, 2))) +def mps_compress_data(): + + # random number generator + rng = np.random.default_rng(934) + + # physical quantum numbers + qd = [-1, 1, 0] + + # virtual bond quantum numbers + qD = [rng.integers(-1, 2, size=Di) for Di in [1, 23, 75, 102, 83, 30, 1]] + + # create random matrix product state with small entanglement + mps = ptn.MPS(qd, qD, fill="random", rng=rng) + for i in range(mps.nsites): + # imitate small entanglement by multiplying bonds with small scaling factors + s = np.exp(-30*(rng.uniform(size=mps.bond_dims[i + 1]))) + s /= np.linalg.norm(s) + mps.A[i] = mps.A[i] * s + # rescale to achieve norm of order 1 + mps.A[i] *= 5 / np.linalg.norm(mps.A[i]) + # convert tensor entries to single precision + mps.A[i] = mps.A[i].astype(np.complex64) + + with h5py.File("data/test_mps_compress.hdf5", "w") as file: + file.attrs["qsite"] = qd + for i, qbond in enumerate(qD): + file.attrs[f"qbond{i}"] = qbond + for i, ai in enumerate(mps.A): + # transposition due to different convention for axis ordering + file[f"a{i}"] = interleave_complex(ai.transpose((1, 0, 2))) + + def mps_split_tensor_svd_data(): # random number generator @@ -156,6 +188,7 @@ def mps_to_statevector_data(): def main(): mps_vdot_data() mps_orthonormalize_qr_data() + mps_compress_data() mps_split_tensor_svd_data() mps_to_statevector_data() diff --git a/test/run_tests.c b/test/run_tests.c index 2dde656..59ded02 100644 --- a/test/run_tests.c +++ b/test/run_tests.c @@ -47,6 +47,7 @@ char* test_su2_tensor_fmove(); char* test_su2_to_dense_tensor(); char* test_mps_vdot(); char* test_mps_orthonormalize_qr(); +char* test_mps_compress(); char* test_mps_split_tensor_svd(); char* test_mps_to_statevector(); char* test_queue(); @@ -122,6 +123,7 @@ int main() TEST_FUNCTION_ENTRY(test_su2_to_dense_tensor), TEST_FUNCTION_ENTRY(test_mps_vdot), TEST_FUNCTION_ENTRY(test_mps_orthonormalize_qr), + TEST_FUNCTION_ENTRY(test_mps_compress), TEST_FUNCTION_ENTRY(test_mps_split_tensor_svd), TEST_FUNCTION_ENTRY(test_mps_to_statevector), TEST_FUNCTION_ENTRY(test_queue), diff --git a/test/tensor/test_block_sparse_tensor.c b/test/tensor/test_block_sparse_tensor.c index 1816c3f..d809494 100644 --- a/test/tensor/test_block_sparse_tensor.c +++ b/test/tensor/test_block_sparse_tensor.c @@ -947,19 +947,9 @@ char* test_block_sparse_tensor_qr() delete_block_sparse_tensor(&qr); // 'q' must be an isometry - struct block_sparse_tensor qc; - copy_block_sparse_tensor(&q, &qc); - conjugate_block_sparse_tensor(&qc); - // revert tensor axes directions for multiplication - qc.axis_dir[0] = -qc.axis_dir[0]; - qc.axis_dir[1] = -qc.axis_dir[1]; - struct block_sparse_tensor qhq; - block_sparse_tensor_dot(&qc, TENSOR_AXIS_RANGE_LEADING, &q, TENSOR_AXIS_RANGE_LEADING, 1, &qhq); - if (!block_sparse_tensor_is_identity(&qhq, 1e-13)) { + if (!block_sparse_tensor_is_isometry(&q, 1e-13, false)) { return "Q matrix is not an isometry"; } - delete_block_sparse_tensor(&qhq); - delete_block_sparse_tensor(&qc); // 'r' only upper triangular after sorting second axis by quantum numbers @@ -1033,19 +1023,9 @@ char* test_block_sparse_tensor_rq() delete_block_sparse_tensor(&rq); // 'q' must be an isometry - struct block_sparse_tensor qc; - copy_block_sparse_tensor(&q, &qc); - conjugate_block_sparse_tensor(&qc); - // revert tensor axes directions for multiplication - qc.axis_dir[0] = -qc.axis_dir[0]; - qc.axis_dir[1] = -qc.axis_dir[1]; - struct block_sparse_tensor qqh; - block_sparse_tensor_dot(&q, TENSOR_AXIS_RANGE_TRAILING, &qc, TENSOR_AXIS_RANGE_TRAILING, 1, &qqh); - if (!block_sparse_tensor_is_identity(&qqh, 1e-13)) { + if (!block_sparse_tensor_is_isometry(&q, 1e-13, true)) { return "Q matrix is not an isometry"; } - delete_block_sparse_tensor(&qqh); - delete_block_sparse_tensor(&qc); // 'r' only upper triangular after sorting first axis by quantum numbers @@ -1127,36 +1107,16 @@ char* test_block_sparse_tensor_svd() delete_block_sparse_tensor(&usvh); // 'u' must be an isometry - struct block_sparse_tensor uc; - copy_block_sparse_tensor(&u, &uc); - conjugate_block_sparse_tensor(&uc); - // revert tensor axes directions for multiplication - uc.axis_dir[0] = -uc.axis_dir[0]; - uc.axis_dir[1] = -uc.axis_dir[1]; - struct block_sparse_tensor uhu; - block_sparse_tensor_dot(&uc, TENSOR_AXIS_RANGE_LEADING, &u, TENSOR_AXIS_RANGE_LEADING, 1, &uhu); - if (!block_sparse_tensor_is_identity(&uhu, 1e-13)) { + if (!block_sparse_tensor_is_isometry(&u, 1e-13, false)) { return "U matrix is not an isometry"; } - delete_block_sparse_tensor(&uhu); - delete_block_sparse_tensor(&uc); if (c == 0) { // 'vh' must be an isometry - struct block_sparse_tensor vt; - copy_block_sparse_tensor(&vh, &vt); - conjugate_block_sparse_tensor(&vt); - // revert tensor axes directions for multiplication - vt.axis_dir[0] = -vt.axis_dir[0]; - vt.axis_dir[1] = -vt.axis_dir[1]; - struct block_sparse_tensor vhv; - block_sparse_tensor_dot(&vh, TENSOR_AXIS_RANGE_TRAILING, &vt, TENSOR_AXIS_RANGE_TRAILING, 1, &vhv); - if (!block_sparse_tensor_is_identity(&vhv, 1e-13)) { + if (!block_sparse_tensor_is_isometry(&vh, 1e-13, true)) { return "V matrix is not an isometry"; } - delete_block_sparse_tensor(&vhv); - delete_block_sparse_tensor(&vt); } else { diff --git a/test/tensor/test_dense_tensor.c b/test/tensor/test_dense_tensor.c index ec7f165..331a615 100644 --- a/test/tensor/test_dense_tensor.c +++ b/test/tensor/test_dense_tensor.c @@ -656,16 +656,9 @@ char* test_dense_tensor_qr() delete_dense_tensor(&qr); // 'q' must be an isometry - struct dense_tensor qc; - copy_dense_tensor(&q, &qc); - conjugate_dense_tensor(&qc); - struct dense_tensor qhq; - dense_tensor_dot(&qc, TENSOR_AXIS_RANGE_LEADING, &q, TENSOR_AXIS_RANGE_LEADING, 1, &qhq); - if (!dense_tensor_is_identity(&qhq, tol)) { + if (!dense_tensor_is_isometry(&q, tol, false)) { return "Q matrix is not an isometry"; } - delete_dense_tensor(&qhq); - delete_dense_tensor(&qc); // 'r' must be upper triangular const long k = dim[0] <= dim[1] ? dim[0] : dim[1]; @@ -730,16 +723,9 @@ char* test_dense_tensor_rq() delete_dense_tensor(&rq); // 'q' must be an isometry - struct dense_tensor qc; - copy_dense_tensor(&q, &qc); - conjugate_dense_tensor(&qc); - struct dense_tensor qqh; - dense_tensor_dot(&q, TENSOR_AXIS_RANGE_TRAILING, &qc, TENSOR_AXIS_RANGE_TRAILING, 1, &qqh); - if (!dense_tensor_is_identity(&qqh, tol)) { + if (!dense_tensor_is_isometry(&q, tol, true)) { return "Q matrix is not an isometry"; } - delete_dense_tensor(&qqh); - delete_dense_tensor(&qc); // 'r' must be upper triangular (referenced from the bottom right entry) const long k = dim[0] <= dim[1] ? dim[0] : dim[1]; @@ -807,28 +793,14 @@ char* test_dense_tensor_svd() delete_dense_tensor(&usvh); // 'u' must be an isometry - struct dense_tensor uc; - copy_dense_tensor(&u, &uc); - conjugate_dense_tensor(&uc); - struct dense_tensor uhu; - dense_tensor_dot(&uc, TENSOR_AXIS_RANGE_LEADING, &u, TENSOR_AXIS_RANGE_LEADING, 1, &uhu); - if (!dense_tensor_is_identity(&uhu, tol)) { + if (!dense_tensor_is_isometry(&u, tol, false)) { return "U matrix is not an isometry"; } - delete_dense_tensor(&uhu); - delete_dense_tensor(&uc); // 'v' must be an isometry - struct dense_tensor vt; - copy_dense_tensor(&vh, &vt); - conjugate_dense_tensor(&vt); - struct dense_tensor vhv; - dense_tensor_dot(&vh, TENSOR_AXIS_RANGE_TRAILING, &vt, TENSOR_AXIS_RANGE_TRAILING, 1, &vhv); - if (!dense_tensor_is_identity(&vhv, tol)) { + if (!dense_tensor_is_isometry(&vh, tol, true)) { return "V matrix is not an isometry"; } - delete_dense_tensor(&vhv); - delete_dense_tensor(&vt); delete_dense_tensor(&vh); delete_dense_tensor(&s);