diff --git a/src/operator/local_op.c b/src/operator/local_op.c index 8b5e111..b653662 100644 --- a/src/operator/local_op.c +++ b/src/operator/local_op.c @@ -50,54 +50,121 @@ void construct_local_operator(const struct local_op_ref* opics, const int nopics { assert(nopics > 0); - // first summand - copy_dense_tensor(&opmap[opics[0].oid], op); - switch (op->dtype) + if (opics[0].oid == OID_NOP) { - case CT_SINGLE_REAL: + // construct dummy identity operator + const long dim[2] = { 1, 1 }; + allocate_dense_tensor(opmap[0].dtype, 2, dim, op); + dense_tensor_set_identity(op); + // scale by weight factor + switch (op->dtype) { - const float* cmap = coeffmap; - scale_dense_tensor(&cmap[opics[0].cid], op); - // add the other summands - for (int i = 1; i < nopics; i++) { - dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + case CT_SINGLE_REAL: + { + const float* cmap = coeffmap; + float weight = 0; + for (int i = 0; i < nopics; i++) { + // operators must be consistent + assert(opics[i].oid == OID_NOP); + weight += cmap[opics[i].cid]; + } + scale_dense_tensor(&weight, op); + break; } - break; - } - case CT_DOUBLE_REAL: - { - const double* cmap = coeffmap; - scale_dense_tensor(&cmap[opics[0].cid], op); - // add the other summands - for (int i = 1; i < nopics; i++) { - dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + case CT_DOUBLE_REAL: + { + const double* cmap = coeffmap; + double weight = 0; + for (int i = 0; i < nopics; i++) { + // operators must be consistent + assert(opics[i].oid == OID_NOP); + weight += cmap[opics[i].cid]; + } + scale_dense_tensor(&weight, op); + break; } - break; - } - case CT_SINGLE_COMPLEX: - { - const scomplex* cmap = coeffmap; - scale_dense_tensor(&cmap[opics[0].cid], op); - // add the other summands - for (int i = 1; i < nopics; i++) { - dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + case CT_SINGLE_COMPLEX: + { + const scomplex* cmap = coeffmap; + scomplex weight = 0; + for (int i = 0; i < nopics; i++) { + // operators must be consistent + assert(opics[i].oid == OID_NOP); + weight += cmap[opics[i].cid]; + } + scale_dense_tensor(&weight, op); + break; } - break; - } - case CT_DOUBLE_COMPLEX: - { - const dcomplex* cmap = coeffmap; - scale_dense_tensor(&cmap[opics[0].cid], op); - // add the other summands - for (int i = 1; i < nopics; i++) { - dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + case CT_DOUBLE_COMPLEX: + { + const dcomplex* cmap = coeffmap; + dcomplex weight = 0; + for (int i = 0; i < nopics; i++) { + // operators must be consistent + assert(opics[i].oid == OID_NOP); + weight += cmap[opics[i].cid]; + } + scale_dense_tensor(&weight, op); + break; + } + default: + { + // unknown data type + assert(false); } - break; } - default: + } + else + { + // first summand + copy_dense_tensor(&opmap[opics[0].oid], op); + switch (op->dtype) { - // unknown data type - assert(false); + case CT_SINGLE_REAL: + { + const float* cmap = coeffmap; + scale_dense_tensor(&cmap[opics[0].cid], op); + // add the other summands + for (int i = 1; i < nopics; i++) { + dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + } + break; + } + case CT_DOUBLE_REAL: + { + const double* cmap = coeffmap; + scale_dense_tensor(&cmap[opics[0].cid], op); + // add the other summands + for (int i = 1; i < nopics; i++) { + dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + } + break; + } + case CT_SINGLE_COMPLEX: + { + const scomplex* cmap = coeffmap; + scale_dense_tensor(&cmap[opics[0].cid], op); + // add the other summands + for (int i = 1; i < nopics; i++) { + dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + } + break; + } + case CT_DOUBLE_COMPLEX: + { + const dcomplex* cmap = coeffmap; + scale_dense_tensor(&cmap[opics[0].cid], op); + // add the other summands + for (int i = 1; i < nopics; i++) { + dense_tensor_scalar_multiply_add(&cmap[opics[i].cid], &opmap[opics[i].oid], op); + } + break; + } + default: + { + // unknown data type + assert(false); + } } } } diff --git a/src/operator/local_op.h b/src/operator/local_op.h index 8cd157e..868c380 100644 --- a/src/operator/local_op.h +++ b/src/operator/local_op.h @@ -12,7 +12,8 @@ /// enum { - OID_IDENTITY = 0, //!< index corresponding to identity operation + OID_NOP = -1, //!< index corresponding to no operation + OID_IDENTITY = 0, //!< index corresponding to identity operation }; diff --git a/src/operator/ttno.c b/src/operator/ttno.c index f754d57..039f8c4 100644 --- a/src/operator/ttno.c +++ b/src/operator/ttno.c @@ -12,26 +12,33 @@ /// void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) { - assert(assembly->graph.nsites >= 1); + assert(assembly->graph.nsites_physical >= 1); + assert(assembly->graph.nsites_branching >= 0); assert(assembly->d >= 1); const long d = assembly->d; ttno->d = d; - ttno->nsites = assembly->graph.nsites; + ttno->nsites_physical = assembly->graph.nsites_physical; + ttno->nsites_branching = assembly->graph.nsites_branching; assert(coefficient_map_is_valid(assembly->dtype, assembly->coeffmap)); + // overall number of sites + const int nsites = assembly->graph.nsites_physical + assembly->graph.nsites_branching; + // tree topology copy_abstract_graph(&assembly->graph.topology, &ttno->topology); ttno->qsite = ct_malloc(d * sizeof(qnumber)); memcpy(ttno->qsite, assembly->qsite, d * sizeof(qnumber)); - ttno->a = ct_calloc(assembly->graph.nsites, sizeof(struct block_sparse_tensor)); + ttno->a = ct_calloc(nsites, sizeof(struct block_sparse_tensor)); - for (int l = 0; l < assembly->graph.nsites; l++) + for (int l = 0; l < nsites; l++) { + const int offset_phys = (l < assembly->graph.nsites_physical ? 2 : 0); + // accumulate entries in a dense tensor first - const int ndim_a_loc = assembly->graph.topology.num_neighbors[l] + 2; + const int ndim_a_loc = assembly->graph.topology.num_neighbors[l] + offset_phys; long* dim_a_loc = ct_malloc(ndim_a_loc * sizeof(long)); for (int i = 0; i < ndim_a_loc; i++) { dim_a_loc[i] = d; @@ -45,11 +52,11 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) assert(assembly->graph.topology.neighbor_map[l][i - 1] < k); } if (k < l) { - dim_a_loc[i] = assembly->graph.num_verts[k*assembly->graph.nsites + l]; + dim_a_loc[i] = assembly->graph.num_verts[k*nsites + l]; } else { - dim_a_loc[i + 2] = assembly->graph.num_verts[l*assembly->graph.nsites + k]; - stride_trail *= dim_a_loc[i + 2]; + dim_a_loc[i + offset_phys] = assembly->graph.num_verts[l*nsites + k]; + stride_trail *= dim_a_loc[i + offset_phys]; } } struct dense_tensor a_loc; @@ -64,7 +71,8 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) construct_local_operator(edge->opics, edge->nopics, assembly->opmap, assembly->coeffmap, &op); assert(op.ndim == 2); - assert(op.dim[0] == d && op.dim[1] == d); + assert(op.dim[0] == op.dim[1]); + assert(op.dim[0] == (l < assembly->graph.nsites_physical ? d : 1)); assert(op.dtype == a_loc.dtype); // add entries of local operator 'op' to 'a_loc' (supporting multiple hyperedges between same nodes) @@ -72,8 +80,8 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) for (int i = 0; i < edge->order; i++) { int k = assembly->graph.topology.neighbor_map[l][i]; assert(k != l); - index_start[k < l ? i : i + 2] = edge->vids[i]; - assert(0 <= edge->vids[i] && edge->vids[i] < a_loc.dim[k < l ? i : i + 2]); + index_start[k < l ? i : i + offset_phys] = edge->vids[i]; + assert(0 <= edge->vids[i] && edge->vids[i] < a_loc.dim[k < l ? i : i + offset_phys]); } long offset = tensor_index_to_offset(a_loc.ndim, a_loc.dim, index_start); ct_free(index_start); @@ -83,7 +91,7 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) { float* al_data = a_loc.data; const float* op_data = op.data; - for (long j = 0; j < d*d; j++, offset += stride_trail) + for (long j = 0; j < op.dim[0]*op.dim[1]; j++, offset += stride_trail) { al_data[offset] += op_data[j]; } @@ -93,7 +101,7 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) { double* al_data = a_loc.data; const double* op_data = op.data; - for (long j = 0; j < d*d; j++, offset += stride_trail) + for (long j = 0; j < op.dim[0]*op.dim[1]; j++, offset += stride_trail) { al_data[offset] += op_data[j]; } @@ -103,7 +111,7 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) { scomplex* al_data = a_loc.data; const scomplex* op_data = op.data; - for (long j = 0; j < d*d; j++, offset += stride_trail) + for (long j = 0; j < op.dim[0]*op.dim[1]; j++, offset += stride_trail) { al_data[offset] += op_data[j]; } @@ -113,7 +121,7 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) { dcomplex* al_data = a_loc.data; const dcomplex* op_data = op.data; - for (long j = 0; j < d*d; j++, offset += stride_trail) + for (long j = 0; j < op.dim[0]*op.dim[1]; j++, offset += stride_trail) { al_data[offset] += op_data[j]; } @@ -130,14 +138,14 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) } // note: entries not adhering to the quantum number sparsity pattern are ignored - qnumber** qnums = ct_calloc(assembly->graph.topology.num_neighbors[l] + 2, sizeof(qnumber*)); + qnumber** qnums = ct_calloc(assembly->graph.topology.num_neighbors[l] + offset_phys, sizeof(qnumber*)); // virtual bond axis is oriented towards smaller site index - enum tensor_axis_direction* axis_dir = ct_malloc((assembly->graph.topology.num_neighbors[l] + 2) * sizeof(enum tensor_axis_direction)); + enum tensor_axis_direction* axis_dir = ct_malloc((assembly->graph.topology.num_neighbors[l] + offset_phys) * sizeof(enum tensor_axis_direction)); for (int i = 0; i < assembly->graph.topology.num_neighbors[l]; i++) { int k = assembly->graph.topology.neighbor_map[l][i]; if (k < l) { - const int iv = k*assembly->graph.nsites + l; + const int iv = k*nsites + l; qnums[i] = ct_malloc(assembly->graph.num_verts[iv] * sizeof(qnumber)); for (int n = 0; n < assembly->graph.num_verts[iv]; n++) { @@ -146,35 +154,38 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) axis_dir[i] = TENSOR_AXIS_OUT; } else { - const int iv = l*assembly->graph.nsites + k; - qnums[i + 2] = ct_malloc(assembly->graph.num_verts[iv] * sizeof(qnumber)); + const int iv = l*nsites + k; + qnums[i + offset_phys] = ct_malloc(assembly->graph.num_verts[iv] * sizeof(qnumber)); for (int n = 0; n < assembly->graph.num_verts[iv]; n++) { - qnums[i + 2][n] = assembly->graph.verts[iv][n].qnum; + qnums[i + offset_phys][n] = assembly->graph.verts[iv][n].qnum; } - axis_dir[i + 2] = TENSOR_AXIS_IN; + axis_dir[i + offset_phys] = TENSOR_AXIS_IN; } } // physical axes at current site - bool site_info_set = false; - for (int i = 0; i < assembly->graph.topology.num_neighbors[l] + 2; i++) + if (l < assembly->graph.nsites_physical) { - if (qnums[i] == NULL) { - assert(qnums[i + 1] == NULL); - qnums[i] = ttno->qsite; - qnums[i + 1] = ttno->qsite; - axis_dir[i] = TENSOR_AXIS_OUT; - axis_dir[i + 1] = TENSOR_AXIS_IN; - site_info_set = true; - break; + bool site_info_set = false; + for (int i = 0; i < assembly->graph.topology.num_neighbors[l] + offset_phys; i++) + { + if (qnums[i] == NULL) { + assert(qnums[i + 1] == NULL); + qnums[i] = ttno->qsite; + qnums[i + 1] = ttno->qsite; + axis_dir[i] = TENSOR_AXIS_OUT; + axis_dir[i + 1] = TENSOR_AXIS_IN; + site_info_set = true; + break; + } } + assert(site_info_set); } - assert(site_info_set); dense_to_block_sparse_tensor(&a_loc, axis_dir, (const qnumber**)qnums, &ttno->a[l]); for (int i = 0; i < assembly->graph.topology.num_neighbors[l]; i++) { int k = assembly->graph.topology.neighbor_map[l][i]; - ct_free(qnums[k < l ? i : i + 2]); + ct_free(qnums[k < l ? i : i + offset_phys]); } ct_free(qnums); ct_free(axis_dir); @@ -199,7 +210,10 @@ void ttno_from_assembly(const struct ttno_assembly* assembly, struct ttno* ttno) /// void delete_ttno(struct ttno* ttno) { - for (int l = 0; l < ttno->nsites; l++) + // overall number of sites + const int nsites = ttno->nsites_physical + ttno->nsites_branching; + + for (int l = 0; l < nsites; l++) { delete_block_sparse_tensor(&ttno->a[l]); } @@ -208,7 +222,8 @@ void delete_ttno(struct ttno* ttno) delete_abstract_graph(&ttno->topology); - ttno->nsites = 0; + ttno->nsites_physical = 0; + ttno->nsites_branching = 0; ct_free(ttno->qsite); ttno->qsite = NULL; @@ -222,8 +237,13 @@ void delete_ttno(struct ttno* ttno) /// void ttno_tensor_get_axis_desc(const struct ttno* ttno, const int i_site, struct ttno_tensor_axis_desc* desc) { - assert(0 <= i_site && i_site < ttno->nsites); - assert(ttno->a[i_site].ndim == ttno->topology.num_neighbors[i_site] + 2); + // overall number of sites + const int nsites = ttno->nsites_physical + ttno->nsites_branching; + + const int offset_phys = (i_site < ttno->nsites_physical ? 2 : 0); + + assert(0 <= i_site && i_site < nsites); + assert(ttno->a[i_site].ndim == ttno->topology.num_neighbors[i_site] + offset_phys); // set to default type for (int i = 0; i < ttno->a[i_site].ndim; i++) { @@ -238,23 +258,26 @@ void ttno_tensor_get_axis_desc(const struct ttno* ttno, const int i_site, struct } int k = ttno->topology.neighbor_map[i_site][i]; assert(k != i_site); - desc[k < i_site ? i : i + 2].type = TTNO_TENSOR_AXIS_VIRTUAL; - desc[k < i_site ? i : i + 2].index = k; + desc[k < i_site ? i : i + offset_phys].type = TTNO_TENSOR_AXIS_VIRTUAL; + desc[k < i_site ? i : i + offset_phys].index = k; } // physical axes at current site - bool site_info_set = false; - for (int i = 0; i < ttno->a[i_site].ndim - 1; i++) + if (i_site < ttno->nsites_physical) { - if (desc[i].type == TTNO_TENSOR_AXIS_PHYS_OUT) { - assert(desc[i + 1].type == TTNO_TENSOR_AXIS_PHYS_OUT); - desc[i + 1].type = TTNO_TENSOR_AXIS_PHYS_IN; - desc[i ].index = i_site; - desc[i + 1].index = i_site; - site_info_set = true; - break; + bool site_info_set = false; + for (int i = 0; i < ttno->a[i_site].ndim - 1; i++) + { + if (desc[i].type == TTNO_TENSOR_AXIS_PHYS_OUT) { + assert(desc[i + 1].type == TTNO_TENSOR_AXIS_PHYS_OUT); + desc[i + 1].type = TTNO_TENSOR_AXIS_PHYS_IN; + desc[i ].index = i_site; + desc[i + 1].index = i_site; + site_info_set = true; + break; + } } + assert(site_info_set); } - assert(site_info_set); } @@ -264,15 +287,21 @@ void ttno_tensor_get_axis_desc(const struct ttno* ttno, const int i_site, struct /// bool ttno_is_consistent(const struct ttno* ttno) { - if (ttno->nsites <= 0) { + if (ttno->nsites_physical <= 0) { return false; } + if (ttno->nsites_branching < 0) { + return false; + } + // overall number of sites + const int nsites = ttno->nsites_physical + ttno->nsites_branching; + if (ttno->d <= 0) { return false; } // topology - if (ttno->topology.num_nodes != ttno->nsites) { + if (ttno->topology.num_nodes != nsites) { return false; } if (!abstract_graph_is_consistent(&ttno->topology)) { @@ -283,16 +312,18 @@ bool ttno_is_consistent(const struct ttno* ttno) return false; } - struct ttno_tensor_axis_desc** axis_desc = ct_malloc(ttno->nsites * sizeof(struct ttno_tensor_axis_desc*)); - for (int l = 0; l < ttno->nsites; l++) + struct ttno_tensor_axis_desc** axis_desc = ct_malloc(nsites * sizeof(struct ttno_tensor_axis_desc*)); + for (int l = 0; l < nsites; l++) { axis_desc[l] = ct_malloc(ttno->a[l].ndim * sizeof(struct ttno_tensor_axis_desc)); ttno_tensor_get_axis_desc(ttno, l, axis_desc[l]); } - for (int l = 0; l < ttno->nsites; l++) + for (int l = 0; l < nsites; l++) { - if (ttno->a[l].ndim != ttno->topology.num_neighbors[l] + 2) { + const int offset_phys = (l < ttno->nsites_physical ? 2 : 0); + + if (ttno->a[l].ndim != ttno->topology.num_neighbors[l] + offset_phys) { return false; } @@ -358,7 +389,7 @@ bool ttno_is_consistent(const struct ttno* ttno) } } - for (int l = 0; l < ttno->nsites; l++) + for (int l = 0; l < nsites; l++) { ct_free(axis_desc[l]); } @@ -572,11 +603,15 @@ static void ttno_contract_subtree(const struct ttno* ttno, const int i_site, con /// void ttno_to_matrix(const struct ttno* ttno, struct block_sparse_tensor* mat) { - assert(ttno->nsites >= 1); + assert(ttno->nsites_physical >= 1); + assert(ttno->nsites_branching >= 0); + + // overall number of sites + const int nsites = ttno->nsites_physical + ttno->nsites_branching; // select site with maximum number of neighbors as root for contraction int i_root = 0; - for (int l = 1; l < ttno->nsites; l++) { + for (int l = 1; l < nsites; l++) { if (ttno->topology.num_neighbors[l] > ttno->topology.num_neighbors[i_root]) { i_root = l; } @@ -586,8 +621,8 @@ void ttno_to_matrix(const struct ttno* ttno, struct block_sparse_tensor* mat) // set parent index to -1 for root node struct ttno_contracted_subtree contracted; ttno_contract_subtree(ttno, i_root, -1, &contracted); - assert(contracted.tensor.ndim == 2 * ttno->nsites); - for (int l = 0; l < ttno->nsites; l++) { + assert(contracted.tensor.ndim == 2 * ttno->nsites_physical); + for (int l = 0; l < ttno->nsites_physical; l++) { // sites must be ordered after subtree contraction assert(contracted.axis_desc[2*l ].index == l && contracted.axis_desc[2*l ].type == TTNO_TENSOR_AXIS_PHYS_OUT); assert(contracted.axis_desc[2*l + 1].index == l && contracted.axis_desc[2*l + 1].type == TTNO_TENSOR_AXIS_PHYS_IN); diff --git a/src/operator/ttno.h b/src/operator/ttno.h index 6801ca7..f30fd35 100644 --- a/src/operator/ttno.h +++ b/src/operator/ttno.h @@ -31,10 +31,11 @@ struct ttno_assembly struct ttno { struct block_sparse_tensor* a; //!< tensors associated with sites, with interleaved physical and virtual bond dimensions (ordered by site indices) - struct abstract_graph topology; //!< logical tree topology; nodes correspond to physical sites + struct abstract_graph topology; //!< logical tree topology; nodes correspond to physical and branching sites qnumber* qsite; //!< physical quantum numbers at each site long d; //!< local physical dimension of each site - int nsites; //!< number of sites + int nsites_physical; //!< number of physical sites + int nsites_branching; //!< number of branching sites }; diff --git a/src/operator/ttno_graph.c b/src/operator/ttno_graph.c index ca0369c..a702a7f 100644 --- a/src/operator/ttno_graph.c +++ b/src/operator/ttno_graph.c @@ -312,10 +312,12 @@ static int construct_vid_index_map(const struct abstract_graph* topology, const /// /// \brief Construct an operator cluster assembly from a list of operator chains. /// -static int cluster_assembly_from_opchains(const struct op_chain* chains, const int nchains, const struct abstract_graph* topology, struct op_cluster_assembly* assembly, int* restrict cids) +static int cluster_assembly_from_opchains(const struct op_chain* chains, const int nchains, const int nsites_physical, const struct abstract_graph* topology, struct op_cluster_assembly* assembly, int* restrict cids) { // need at least one site assert(topology->num_nodes >= 1); + // number of physical sites must be smaller or equal to overall number of sites + assert(nsites_physical <= topology->num_nodes); // expecting a tree assert(abstract_graph_is_connected_tree(topology)); // list of operator chains cannot be empty @@ -370,9 +372,15 @@ static int cluster_assembly_from_opchains(const struct op_chain* chains, const i continue; } - // pad identities + assert(chains[k].length <= nsites_physical); + + // pad identities and no-ops struct op_chain pad_chain; op_chain_pad_identities(&chains[k], nsites, &pad_chain); + for (int l = nsites_physical; l < nsites; l++) { + assert(pad_chain.qnums[l] == 0); + pad_chain.oids[l] = OID_NOP; + } assert(pad_chain.length == nsites); // require zero leading and trailing quantum numbers assert(pad_chain.qnums[0] == 0 && pad_chain.qnums[nsites] == 0); @@ -822,10 +830,11 @@ static void enumerate_site_distance_tuples(const struct abstract_graph* topology /// /// \brief Construct a TTNO operator graph from a list of operator chains. /// -int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, const struct abstract_graph* topology, struct ttno_graph* ttno_graph) +int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, const int nsites_physical, const struct abstract_graph* topology, struct ttno_graph* ttno_graph) { // need at least one site assert(topology->num_nodes > 0); + assert(nsites_physical > 0 && nsites_physical <= topology->num_nodes); // expecting a tree assert(abstract_graph_is_connected_tree(topology)); // list of operator chains cannot be empty @@ -836,7 +845,7 @@ int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, c // initial operator cluster assembly struct op_cluster_assembly assembly; int* cids_next = ct_malloc(nchains * sizeof(int)); - if (cluster_assembly_from_opchains(chains, nchains, topology, &assembly, cids_next) < 0) { + if (cluster_assembly_from_opchains(chains, nchains, nsites_physical, topology, &assembly, cids_next) < 0) { return -1; } @@ -859,7 +868,8 @@ int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, c ttno_graph->verts = ct_calloc(nsites*nsites, sizeof(struct ttno_graph_vertex*)); ttno_graph->num_edges = ct_calloc(nsites, sizeof(int)); ttno_graph->num_verts = ct_calloc(nsites*nsites, sizeof(int)); - ttno_graph->nsites = nsites; + ttno_graph->nsites_physical = nsites_physical; + ttno_graph->nsites_branching = nsites - nsites_physical; // select site with maximum number of neighbors as root int i_root = 0; @@ -1137,8 +1147,10 @@ int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, c /// void delete_ttno_graph(struct ttno_graph* graph) { + const int nsites = graph->nsites_physical + graph->nsites_branching; + // edges - for (int l = 0; l < graph->nsites; l++) + for (int l = 0; l < nsites; l++) { for (int i = 0; i < graph->num_edges[l]; i++) { @@ -1150,7 +1162,7 @@ void delete_ttno_graph(struct ttno_graph* graph) ct_free(graph->num_edges); // vertices - for (int l = 0; l < graph->nsites; l++) + for (int l = 0; l < nsites; l++) { for (int n = 0; n < graph->topology.num_neighbors[l]; n++) { @@ -1158,7 +1170,7 @@ void delete_ttno_graph(struct ttno_graph* graph) if (k > l) { continue; } - const int iv = edge_to_vertex_index(graph->nsites, k, l); + const int iv = edge_to_vertex_index(nsites, k, l); for (int i = 0; i < graph->num_verts[iv]; i++) { delete_ttno_graph_vertex(&graph->verts[iv][i]); } @@ -1178,12 +1190,17 @@ void delete_ttno_graph(struct ttno_graph* graph) /// bool ttno_graph_is_consistent(const struct ttno_graph* graph) { - if (graph->nsites <= 0) { + if (graph->nsites_physical <= 0) { return false; } + if (graph->nsites_branching < 0) { + return false; + } + // overall number of sites + const int nsites = graph->nsites_physical + graph->nsites_branching; // topology - if (graph->topology.num_nodes != graph->nsites) { + if (graph->topology.num_nodes != nsites) { return false; } if (!abstract_graph_is_consistent(&graph->topology)) { @@ -1195,11 +1212,11 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) } // compatibility of vertex numbers with tree topology - for (int l = 0; l < graph->nsites; l++) + for (int l = 0; l < nsites; l++) { // entries in 'num_verts' must be zero for k >= l - for (int k = l; k < graph->nsites; k++) { - if (graph->num_verts[k*graph->nsites + l] != 0) { + for (int k = l; k < nsites; k++) { + if (graph->num_verts[k*nsites + l] != 0) { return false; } } @@ -1213,7 +1230,7 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) break; } } - const int iv = edge_to_vertex_index(graph->nsites, k, l); + const int iv = edge_to_vertex_index(nsites, k, l); if (is_neighbor) { if (graph->num_verts[iv] <= 0) { return false; @@ -1228,7 +1245,7 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) } // edges indexed by a vertex must point back to same vertex - for (int l = 0; l < graph->nsites; l++) + for (int l = 0; l < nsites; l++) { for (int n = 0; n < graph->topology.num_neighbors[l]; n++) { @@ -1244,7 +1261,7 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) } assert(m < graph->topology.num_neighbors[k]); - const int iv = edge_to_vertex_index(graph->nsites, k, l); + const int iv = edge_to_vertex_index(nsites, k, l); for (int i = 0; i < graph->num_verts[iv]; i++) { @@ -1274,7 +1291,7 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) } // vertices indexed by an edge must point back to same edge - for (int l = 0; l < graph->nsites; l++) + for (int l = 0; l < nsites; l++) { for (int j = 0; j < graph->num_edges[l]; j++) { @@ -1285,7 +1302,7 @@ bool ttno_graph_is_consistent(const struct ttno_graph* graph) for (int n = 0; n < edge->order; n++) { int k = graph->topology.neighbor_map[l][n]; - int iv = edge_to_vertex_index(graph->nsites, k, l); + int iv = edge_to_vertex_index(nsites, k, l); const struct ttno_graph_vertex* vertex = &graph->verts[iv][edge->vids[n]]; bool edge_ref = false; for (int i = 0; i < vertex->num_edges[l < k ? 0 : 1]; i++) { @@ -1370,6 +1387,7 @@ static int compare_indexed_site_index(const void* a, const void* b) static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const int i_site, const int i_parent, const struct dense_tensor* opmap, const void* coeffmap, struct ttno_graph_contracted_subtree* contracted) { assert(i_site != i_parent); + assert(graph->topology.num_nodes == graph->nsites_physical + graph->nsites_branching); // local dimension const long d = opmap[0].dim[0]; @@ -1424,7 +1442,8 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in struct dense_tensor op; construct_local_operator(edge->opics, edge->nopics, opmap, coeffmap, &op); assert(op.ndim == 2); - assert(op.dim[0] == d && op.dim[1] == d); + assert(op.dim[0] == op.dim[1]); + assert(op.dim[0] == (i_site < graph->nsites_physical ? d : 1)); // compute Kronecker products with child blocks indexed by current edge for (int n = 0; n < graph->topology.num_neighbors[i_site]; n++) @@ -1454,7 +1473,7 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in } else // not root node { - const int iv = edge_to_vertex_index(graph->nsites, i_site, i_parent); + const int iv = edge_to_vertex_index(graph->topology.num_nodes, i_site, i_parent); assert(graph->verts[iv] != NULL); contracted->nblocks = graph->num_verts[iv]; @@ -1476,7 +1495,8 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in struct dense_tensor op; construct_local_operator(edge->opics, edge->nopics, opmap, coeffmap, &op); assert(op.ndim == 2); - assert(op.dim[0] == d && op.dim[1] == d); + assert(op.dim[0] == op.dim[1]); + assert(op.dim[0] == (i_site < graph->nsites_physical ? d : 1)); // compute Kronecker products with child blocks indexed by current edge for (int n = 0; n < graph->topology.num_neighbors[i_site]; n++) @@ -1528,12 +1548,9 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in } qsort(indexed_sites, contracted->nsites, sizeof(struct indexed_site_index), compare_indexed_site_index); int* perm = ct_malloc(2 * contracted->nsites * sizeof(int)); - for (int j = 0; j < contracted->nsites; j++) - { - contracted->i_sites[j] = indexed_sites[j].i_site; + for (int j = 0; j < contracted->nsites; j++) { perm[j] = indexed_sites[j].index; } - ct_free(indexed_sites); // skip permutation operations in case of an identity permutation bool is_identity_perm = true; for (int j = 0; j < contracted->nsites; j++) { @@ -1549,8 +1566,9 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in perm[contracted->nsites + j] = contracted->nsites + perm[j]; } long* dim = ct_malloc(2 * contracted->nsites * sizeof(long)); - for (int j = 0; j < 2 * contracted->nsites; j++) { - dim[j] = d; + for (int j = 0; j < contracted->nsites; j++) { + dim[j] = (contracted->i_sites[j] < graph->nsites_physical ? d : 1); + dim[contracted->nsites + j] = dim[j]; } for (int i = 0; i < contracted->nblocks; i++) { @@ -1569,6 +1587,11 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in ct_free(dim); } ct_free(perm); + // updated site enumeration after permutation + for (int j = 0; j < contracted->nsites; j++) { + contracted->i_sites[j] = indexed_sites[j].i_site; + } + ct_free(indexed_sites); } @@ -1578,12 +1601,16 @@ static void ttno_graph_contract_subtree(const struct ttno_graph* graph, const in /// void ttno_graph_to_matrix(const struct ttno_graph* graph, const struct dense_tensor* opmap, const void* coeffmap, struct dense_tensor* mat) { - assert(graph->nsites >= 1); + assert(graph->nsites_physical >= 1); assert(coefficient_map_is_valid(opmap[0].dtype, coeffmap)); + // overall number of sites + const int nsites = graph->nsites_physical + graph->nsites_branching; + assert(graph->topology.num_nodes == nsites); + // select site with maximum number of neighbors as root for contraction int i_root = 0; - for (int l = 1; l < graph->nsites; l++) { + for (int l = 1; l < nsites; l++) { if (graph->topology.num_neighbors[l] > graph->topology.num_neighbors[i_root]) { i_root = l; } @@ -1593,7 +1620,7 @@ void ttno_graph_to_matrix(const struct ttno_graph* graph, const struct dense_ten // set parent index to -1 for root node struct ttno_graph_contracted_subtree contracted; ttno_graph_contract_subtree(graph, i_root, -1, opmap, coeffmap, &contracted); - assert(contracted.nsites == graph->nsites); + assert(contracted.nsites == nsites); assert(contracted.nblocks == 1); for (int l = 0; l < contracted.nsites; l++) { assert(contracted.i_sites[l] == l); diff --git a/src/operator/ttno_graph.h b/src/operator/ttno_graph.h index 8ccee5d..4439d63 100644 --- a/src/operator/ttno_graph.h +++ b/src/operator/ttno_graph.h @@ -44,17 +44,20 @@ struct ttno_graph_hyperedge /// /// \brief TTNO graph internal data structure for generating TTNO representations. /// +/// Sites are enumerated as physical sites first, then branching sites. +/// struct ttno_graph { - struct abstract_graph topology; //!< logical tree topology; nodes correspond to physical sites - struct ttno_graph_hyperedge** edges; //!< list of hyperedges for each physical site + struct abstract_graph topology; //!< logical tree topology; nodes correspond to physical and branching sites + struct ttno_graph_hyperedge** edges; //!< list of hyperedges for each physical and branching site struct ttno_graph_vertex** verts; //!< list of vertices for each virtual bond, indexed by corresponding hyperedge site indices (i, j) with i < j int* num_edges; //!< number of edges for each site int* num_verts; //!< number of vertices for each virtual bond, i.e., virtual bond dimensions, indexed by corresponding hyperedge site indices (i, j) with i < j - int nsites; //!< number of sites + int nsites_physical; //!< number of physical sites + int nsites_branching; //!< number of branching sites }; -int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, const struct abstract_graph* topology, struct ttno_graph* ttno_graph); +int ttno_graph_from_opchains(const struct op_chain* chains, const int nchains, const int nsites_physical, const struct abstract_graph* topology, struct ttno_graph* ttno_graph); void delete_ttno_graph(struct ttno_graph* graph); diff --git a/test/operator/data/test_ttno_from_assembly.hdf5 b/test/operator/data/test_ttno_from_assembly.hdf5 index 9315f96..be85cb3 100644 Binary files a/test/operator/data/test_ttno_from_assembly.hdf5 and b/test/operator/data/test_ttno_from_assembly.hdf5 differ diff --git a/test/operator/data/test_ttno_graph_from_opchains.hdf5 b/test/operator/data/test_ttno_graph_from_opchains.hdf5 index e709ad1..8008a52 100644 Binary files a/test/operator/data/test_ttno_graph_from_opchains.hdf5 and b/test/operator/data/test_ttno_graph_from_opchains.hdf5 differ diff --git a/test/operator/test_ttno.c b/test/operator/test_ttno.c index 91f14c7..d4e4f52 100644 --- a/test/operator/test_ttno.c +++ b/test/operator/test_ttno.c @@ -13,8 +13,9 @@ char* test_ttno_from_assembly() return "'H5Fopen' in test_ttno_from_assembly failed"; } - // number of lattice sites - const int nsites = 7; + // number of physical and branching lattice sites + const int nsites_physical = 6; + const int nsites_branching = 2; // local physical dimension const long d = 3; @@ -22,103 +23,110 @@ char* test_ttno_from_assembly() // tree topology: // - // 4 6 + // 4 0 // \ / // \ / - // 0 + // 6 // | // | - // 2 --- 3 --- 1 + // 2 --- 5 --- 1 --- 7 // | // | - // 5 + // 3 // - int neigh0[] = { 3, 4, 6 }; - int neigh1[] = { 3 }; - int neigh2[] = { 3 }; - int neigh3[] = { 0, 1, 2, 5 }; - int neigh4[] = { 0 }; - int neigh5[] = { 3 }; - int neigh6[] = { 0 }; - int* neighbor_map[7] = { - neigh0, neigh1, neigh2, neigh3, neigh4, neigh5, neigh6, + int neigh0[] = { 6 }; + int neigh1[] = { 5, 7 }; + int neigh2[] = { 5 }; + int neigh3[] = { 5 }; + int neigh4[] = { 6 }; + int neigh5[] = { 1, 2, 3, 6 }; + int neigh6[] = { 0, 4, 5 }; + int neigh7[] = { 1 }; + int* neighbor_map[8] = { + neigh0, neigh1, neigh2, neigh3, neigh4, neigh5, neigh6, neigh7, }; - int num_neighbors[7] = { - ARRLEN(neigh0), ARRLEN(neigh1), ARRLEN(neigh2), ARRLEN(neigh3), ARRLEN(neigh4), ARRLEN(neigh5), ARRLEN(neigh6), + int num_neighbors[8] = { + ARRLEN(neigh0), ARRLEN(neigh1), ARRLEN(neigh2), ARRLEN(neigh3), ARRLEN(neigh4), ARRLEN(neigh5), ARRLEN(neigh6), ARRLEN(neigh7), }; struct abstract_graph topology = { .neighbor_map = neighbor_map, .num_neighbors = num_neighbors, - .num_nodes = nsites, + .num_nodes = nsites_physical + nsites_branching, }; // vertex groups, corresponding to virtual bonds and indexed by site indices (i, j) with i < j - int v03_0_eids_0[] = { 0, 2, 3 }; int v03_0_eids_1[] = { 1, 2 }; - int v03_1_eids_0[] = { 1 }; int v03_1_eids_1[] = { 0 }; - struct ttno_graph_vertex vl03[] = { - { .eids = { v03_0_eids_0, v03_0_eids_1 }, .num_edges = { ARRLEN(v03_0_eids_0), ARRLEN(v03_0_eids_1) }, .qnum = -1 }, - { .eids = { v03_1_eids_0, v03_1_eids_1 }, .num_edges = { ARRLEN(v03_1_eids_0), ARRLEN(v03_1_eids_1) }, .qnum = 0 }, - }; - int v04_0_eids_0[] = { 1, 2 }; int v04_0_eids_1[] = { 0 }; - int v04_1_eids_0[] = { 0, 3 }; int v04_1_eids_1[] = { 1 }; - struct ttno_graph_vertex vl04[] = { - { .eids = { v04_0_eids_0, v04_0_eids_1 }, .num_edges = { ARRLEN(v04_0_eids_0), ARRLEN(v04_0_eids_1) }, .qnum = 1 }, - { .eids = { v04_1_eids_0, v04_1_eids_1 }, .num_edges = { ARRLEN(v04_1_eids_0), ARRLEN(v04_1_eids_1) }, .qnum = 0 }, - }; - int v06_0_eids_0[] = { 0, 2 }; int v06_0_eids_1[] = { 1, 3 }; - int v06_1_eids_0[] = { 3 }; int v06_1_eids_1[] = { 0 }; - int v06_2_eids_0[] = { 1 }; int v06_2_eids_1[] = { 2 }; + int v06_0_eids_0[] = { 1, 3 }; int v06_0_eids_1[] = { 2, 3 }; + int v06_1_eids_0[] = { 0 }; int v06_1_eids_1[] = { 1 }; + int v06_2_eids_0[] = { 2 }; int v06_2_eids_1[] = { 0 }; struct ttno_graph_vertex vl06[] = { { .eids = { v06_0_eids_0, v06_0_eids_1 }, .num_edges = { ARRLEN(v06_0_eids_0), ARRLEN(v06_0_eids_1) }, .qnum = 0 }, - { .eids = { v06_1_eids_0, v06_1_eids_1 }, .num_edges = { ARRLEN(v06_1_eids_0), ARRLEN(v06_1_eids_1) }, .qnum = -1 }, - { .eids = { v06_2_eids_0, v06_2_eids_1 }, .num_edges = { ARRLEN(v06_2_eids_0), ARRLEN(v06_2_eids_1) }, .qnum = 1 }, + { .eids = { v06_1_eids_0, v06_1_eids_1 }, .num_edges = { ARRLEN(v06_1_eids_0), ARRLEN(v06_1_eids_1) }, .qnum = 1 }, + { .eids = { v06_2_eids_0, v06_2_eids_1 }, .num_edges = { ARRLEN(v06_2_eids_0), ARRLEN(v06_2_eids_1) }, .qnum = -1 }, + }; + int v15_0_eids_0[] = { 0, 2 }; int v15_0_eids_1[] = { 0 }; + int v15_1_eids_0[] = { 1 }; int v15_1_eids_1[] = { 1, 2 }; + struct ttno_graph_vertex vl15[] = { + { .eids = { v15_0_eids_0, v15_0_eids_1 }, .num_edges = { ARRLEN(v15_0_eids_0), ARRLEN(v15_0_eids_1) }, .qnum = 1 }, + { .eids = { v15_1_eids_0, v15_1_eids_1 }, .num_edges = { ARRLEN(v15_1_eids_0), ARRLEN(v15_1_eids_1) }, .qnum = 0 }, }; - int v13_0_eids_0[] = { 0, 2 }; int v13_0_eids_1[] = { 0 }; - int v13_1_eids_0[] = { 1 }; int v13_1_eids_1[] = { 1, 2 }; - struct ttno_graph_vertex vl13[] = { - { .eids = { v13_0_eids_0, v13_0_eids_1 }, .num_edges = { ARRLEN(v13_0_eids_0), ARRLEN(v13_0_eids_1) }, .qnum = 1 }, - { .eids = { v13_1_eids_0, v13_1_eids_1 }, .num_edges = { ARRLEN(v13_1_eids_0), ARRLEN(v13_1_eids_1) }, .qnum = 0 }, + int v17_0_eids_0[] = { 0, 1, 2 }; int v17_0_eids_1[] = { 0, 1 }; + struct ttno_graph_vertex vl17[] = { + { .eids = { v17_0_eids_0, v17_0_eids_1 }, .num_edges = { ARRLEN(v17_0_eids_0), ARRLEN(v17_0_eids_1) }, .qnum = 0 }, }; - int v23_0_eids_0[] = { 1 }; int v23_0_eids_1[] = { 0 }; - int v23_1_eids_0[] = { 0 }; int v23_1_eids_1[] = { 1, 2 }; - struct ttno_graph_vertex vl23[] = { - { .eids = { v23_0_eids_0, v23_0_eids_1 }, .num_edges = { ARRLEN(v23_0_eids_0), ARRLEN(v23_0_eids_1) }, .qnum = -1 }, - { .eids = { v23_1_eids_0, v23_1_eids_1 }, .num_edges = { ARRLEN(v23_1_eids_0), ARRLEN(v23_1_eids_1) }, .qnum = 0 }, + int v25_0_eids_0[] = { 1 }; int v25_0_eids_1[] = { 0 }; + int v25_1_eids_0[] = { 0 }; int v25_1_eids_1[] = { 1, 2 }; + struct ttno_graph_vertex vl25[] = { + { .eids = { v25_0_eids_0, v25_0_eids_1 }, .num_edges = { ARRLEN(v25_0_eids_0), ARRLEN(v25_0_eids_1) }, .qnum = -1 }, + { .eids = { v25_1_eids_0, v25_1_eids_1 }, .num_edges = { ARRLEN(v25_1_eids_0), ARRLEN(v25_1_eids_1) }, .qnum = 0 }, }; - int v35_0_eids_0[] = { 0, 1, 2 }; int v35_0_eids_1[] = { 0 }; + int v35_0_eids_0[] = { 0 }; int v35_0_eids_1[] = { 0, 1, 2 }; struct ttno_graph_vertex vl35[] = { { .eids = { v35_0_eids_0, v35_0_eids_1 }, .num_edges = { ARRLEN(v35_0_eids_0), ARRLEN(v35_0_eids_1) }, .qnum = 0 }, }; - struct ttno_graph_vertex* vertices[7 * 7] = { - // 0 1 2 3 4 5 6 - NULL, NULL, NULL, vl03, vl04, NULL, vl06, // 0 - NULL, NULL, NULL, vl13, NULL, NULL, NULL, // 1 - NULL, NULL, NULL, vl23, NULL, NULL, NULL, // 2 - NULL, NULL, NULL, NULL, NULL, vl35, NULL, // 3 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, // 4 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, // 5 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, // 6 + int v46_0_eids_0[] = { 0 }; int v46_0_eids_1[] = { 1, 2 }; + int v46_1_eids_0[] = { 1 }; int v46_1_eids_1[] = { 0, 3 }; + struct ttno_graph_vertex vl46[] = { + { .eids = { v46_0_eids_0, v46_0_eids_1 }, .num_edges = { ARRLEN(v46_0_eids_0), ARRLEN(v46_0_eids_1) }, .qnum = -1 }, + { .eids = { v46_1_eids_0, v46_1_eids_1 }, .num_edges = { ARRLEN(v46_1_eids_0), ARRLEN(v46_1_eids_1) }, .qnum = 0 }, }; - int num_vertices[7 * 7] = { - // 0 1 2 3 4 5 6 - 0, 0, 0, ARRLEN(vl03), ARRLEN(vl04), 0, ARRLEN(vl06), // 0 - 0, 0, 0, ARRLEN(vl13), 0, 0, 0, // 1 - 0, 0, 0, ARRLEN(vl23), 0, 0, 0, // 2 - 0, 0, 0, 0, 0, ARRLEN(vl35), 0, // 3 - 0, 0, 0, 0, 0, 0, 0, // 4 - 0, 0, 0, 0, 0, 0, 0, // 5 - 0, 0, 0, 0, 0, 0, 0, // 6 + int v56_0_eids_0[] = { 1, 2 }; int v56_0_eids_1[] = { 0, 2 }; + int v56_1_eids_0[] = { 0 }; int v56_1_eids_1[] = { 1, 3 }; + struct ttno_graph_vertex vl56[] = { + { .eids = { v56_0_eids_0, v56_0_eids_1 }, .num_edges = { ARRLEN(v56_0_eids_0), ARRLEN(v56_0_eids_1) }, .qnum = 1 }, + { .eids = { v56_1_eids_0, v56_1_eids_1 }, .num_edges = { ARRLEN(v56_1_eids_0), ARRLEN(v56_1_eids_1) }, .qnum = 0 }, + }; + struct ttno_graph_vertex* vertices[8 * 8] = { + // 0 1 2 3 4 5 6 7 + NULL, NULL, NULL, NULL, NULL, NULL, vl06, NULL, // 0 + NULL, NULL, NULL, NULL, NULL, vl15, NULL, vl17, // 1 + NULL, NULL, NULL, NULL, NULL, vl25, NULL, NULL, // 2 + NULL, NULL, NULL, NULL, NULL, vl35, NULL, NULL, // 3 + NULL, NULL, NULL, NULL, NULL, NULL, vl46, NULL, // 4 + NULL, NULL, NULL, NULL, NULL, NULL, vl56, NULL, // 5 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, // 6 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, // 7 + }; + int num_vertices[8 * 8] = { + // 0 1 2 3 4 5 6 7 + 0, 0, 0, 0, 0, 0, ARRLEN(vl06), 0, // 0 + 0, 0, 0, 0, 0, ARRLEN(vl15), 0, ARRLEN(vl17), // 1 + 0, 0, 0, 0, 0, ARRLEN(vl25), 0, 0, // 2 + 0, 0, 0, 0, 0, ARRLEN(vl35), 0, 0, // 3 + 0, 0, 0, 0, 0, 0, ARRLEN(vl46), 0, // 4 + 0, 0, 0, 0, 0, 0, ARRLEN(vl56), 0, // 5 + 0, 0, 0, 0, 0, 0, 0, 0, // 6 + 0, 0, 0, 0, 0, 0, 0, 0, // 7 }; // edges - struct local_op_ref e0_0_opics[] = { { .oid = 0, .cid = 11 }, }; - struct local_op_ref e0_1_opics[] = { { .oid = 4, .cid = 7 }, { .oid = 1, .cid = 10 }, }; - struct local_op_ref e0_2_opics[] = { { .oid = 5, .cid = 9 }, { .oid = 6, .cid = 12 }, }; - struct local_op_ref e0_3_opics[] = { { .oid = 7, .cid = 3 }, }; - int e0_0_vids[] = { 0, 1, 0 }; - int e0_1_vids[] = { 1, 0, 2 }; - int e0_2_vids[] = { 0, 0, 0 }; - int e0_3_vids[] = { 0, 1, 1 }; + struct local_op_ref e0_0_opics[] = { { .oid = 4, .cid = 15 }, }; + struct local_op_ref e0_1_opics[] = { { .oid = 22, .cid = 17 }, }; + struct local_op_ref e0_2_opics[] = { { .oid = 7, .cid = 5 }, { .oid = 17, .cid = 16 }, }; + struct local_op_ref e0_3_opics[] = { { .oid = 24, .cid = 19 }, }; + int e0_0_vids[] = { 1 }; + int e0_1_vids[] = { 0 }; + int e0_2_vids[] = { 2 }; + int e0_3_vids[] = { 0 }; struct ttno_graph_hyperedge el0[] = { { .vids = e0_0_vids, .order = ARRLEN(e0_0_vids), .opics = e0_0_opics, .nopics = ARRLEN(e0_0_opics) }, { .vids = e0_1_vids, .order = ARRLEN(e0_1_vids), .opics = e0_1_opics, .nopics = ARRLEN(e0_1_opics) }, @@ -128,9 +136,9 @@ char* test_ttno_from_assembly() struct local_op_ref e1_0_opics[] = { { .oid = 8, .cid = 9 }, }; struct local_op_ref e1_1_opics[] = { { .oid = 9, .cid = 2 }, }; struct local_op_ref e1_2_opics[] = { { .oid = 10, .cid = 14 }, { .oid = 11, .cid = 4 }, }; - int e1_0_vids[] = { 0 }; - int e1_1_vids[] = { 1 }; - int e1_2_vids[] = { 0 }; + int e1_0_vids[] = { 0, 0 }; + int e1_1_vids[] = { 1, 0 }; + int e1_2_vids[] = { 0, 0 }; struct ttno_graph_hyperedge el1[] = { { .vids = e1_0_vids, .order = ARRLEN(e1_0_vids), .opics = e1_0_opics, .nopics = ARRLEN(e1_0_opics) }, { .vids = e1_1_vids, .order = ARRLEN(e1_1_vids), .opics = e1_1_opics, .nopics = ARRLEN(e1_1_opics) }, @@ -144,16 +152,10 @@ char* test_ttno_from_assembly() { .vids = e2_0_vids, .order = ARRLEN(e2_0_vids), .opics = e2_0_opics, .nopics = ARRLEN(e2_0_opics) }, { .vids = e2_1_vids, .order = ARRLEN(e2_1_vids), .opics = e2_1_opics, .nopics = ARRLEN(e2_1_opics) }, }; - struct local_op_ref e3_0_opics[] = { { .oid = 14, .cid = 15 }, }; - struct local_op_ref e3_1_opics[] = { { .oid = 10, .cid = 13 }, }; - struct local_op_ref e3_2_opics[] = { { .oid = 15, .cid = 3 }, { .oid = 16, .cid = 20 }, }; - int e3_0_vids[] = { 1, 0, 0, 0 }; - int e3_1_vids[] = { 0, 1, 1, 0 }; - int e3_2_vids[] = { 0, 1, 1, 0 }; + struct local_op_ref e3_0_opics[] = { { .oid = 18, .cid = 1 }, { .oid = 19, .cid = 18 }, { .oid = 20, .cid = 11 }, }; + int e3_0_vids[] = { 0 }; struct ttno_graph_hyperedge el3[] = { { .vids = e3_0_vids, .order = ARRLEN(e3_0_vids), .opics = e3_0_opics, .nopics = ARRLEN(e3_0_opics) }, - { .vids = e3_1_vids, .order = ARRLEN(e3_1_vids), .opics = e3_1_opics, .nopics = ARRLEN(e3_1_opics) }, - { .vids = e3_2_vids, .order = ARRLEN(e3_2_vids), .opics = e3_2_opics, .nopics = ARRLEN(e3_2_opics) }, }; struct local_op_ref e4_0_opics[] = { { .oid = 17, .cid = 8 }, }; struct local_op_ref e4_1_opics[] = { { .oid = 14, .cid = 11 }, }; @@ -163,40 +165,55 @@ char* test_ttno_from_assembly() { .vids = e4_0_vids, .order = ARRLEN(e4_0_vids), .opics = e4_0_opics, .nopics = ARRLEN(e4_0_opics) }, { .vids = e4_1_vids, .order = ARRLEN(e4_1_vids), .opics = e4_1_opics, .nopics = ARRLEN(e4_1_opics) }, }; - struct local_op_ref e5_0_opics[] = { { .oid = 18, .cid = 1 }, { .oid = 19, .cid = 18 }, { .oid = 20, .cid = 11 }, }; - int e5_0_vids[] = { 0 }; + struct local_op_ref e5_0_opics[] = { { .oid = 14, .cid = 15 }, }; + struct local_op_ref e5_1_opics[] = { { .oid = 10, .cid = 13 }, }; + struct local_op_ref e5_2_opics[] = { { .oid = 15, .cid = 3 }, { .oid = 16, .cid = 20 }, }; + int e5_0_vids[] = { 0, 0, 0, 1 }; + int e5_1_vids[] = { 1, 1, 0, 0 }; + int e5_2_vids[] = { 1, 1, 0, 0 }; struct ttno_graph_hyperedge el5[] = { { .vids = e5_0_vids, .order = ARRLEN(e5_0_vids), .opics = e5_0_opics, .nopics = ARRLEN(e5_0_opics) }, + { .vids = e5_1_vids, .order = ARRLEN(e5_1_vids), .opics = e5_1_opics, .nopics = ARRLEN(e5_1_opics) }, + { .vids = e5_2_vids, .order = ARRLEN(e5_2_vids), .opics = e5_2_opics, .nopics = ARRLEN(e5_2_opics) }, }; - struct local_op_ref e6_0_opics[] = { { .oid = 21, .cid = 15 }, }; - struct local_op_ref e6_1_opics[] = { { .oid = 22, .cid = 17 }, }; - struct local_op_ref e6_2_opics[] = { { .oid = 23, .cid = 5 }, { .oid = 17, .cid = 16 }, }; - struct local_op_ref e6_3_opics[] = { { .oid = 24, .cid = 19 }, }; - int e6_0_vids[] = { 1 }; - int e6_1_vids[] = { 0 }; - int e6_2_vids[] = { 2 }; - int e6_3_vids[] = { 0 }; + struct local_op_ref e6_0_opics[] = { { .oid = OID_NOP, .cid = 11 }, }; + struct local_op_ref e6_1_opics[] = { { .oid = OID_NOP, .cid = 7 }, { .oid = OID_NOP, .cid = 10 }, }; + struct local_op_ref e6_2_opics[] = { { .oid = OID_NOP, .cid = 9 }, { .oid = OID_NOP, .cid = 12 }, }; + struct local_op_ref e6_3_opics[] = { { .oid = OID_NOP, .cid = 3 }, }; + int e6_0_vids[] = { 2, 1, 0 }; + int e6_1_vids[] = { 1, 0, 1 }; + int e6_2_vids[] = { 0, 0, 0 }; + int e6_3_vids[] = { 0, 1, 1 }; struct ttno_graph_hyperedge el6[] = { { .vids = e6_0_vids, .order = ARRLEN(e6_0_vids), .opics = e6_0_opics, .nopics = ARRLEN(e6_0_opics) }, { .vids = e6_1_vids, .order = ARRLEN(e6_1_vids), .opics = e6_1_opics, .nopics = ARRLEN(e6_1_opics) }, { .vids = e6_2_vids, .order = ARRLEN(e6_2_vids), .opics = e6_2_opics, .nopics = ARRLEN(e6_2_opics) }, { .vids = e6_3_vids, .order = ARRLEN(e6_3_vids), .opics = e6_3_opics, .nopics = ARRLEN(e6_3_opics) }, }; - struct ttno_graph_hyperedge* edges[7] = { - el0, el1, el2, el3, el4, el5, el6, + struct local_op_ref e7_0_opics[] = { { .oid = OID_NOP, .cid = 21 }, }; + struct local_op_ref e7_1_opics[] = { { .oid = OID_NOP, .cid = 7 }, }; + int e7_0_vids[] = { 0 }; + int e7_1_vids[] = { 0 }; + struct ttno_graph_hyperedge el7[] = { + { .vids = e7_0_vids, .order = ARRLEN(e7_0_vids), .opics = e7_0_opics, .nopics = ARRLEN(e7_0_opics) }, + { .vids = e7_1_vids, .order = ARRLEN(e7_1_vids), .opics = e7_1_opics, .nopics = ARRLEN(e7_1_opics) }, + }; + struct ttno_graph_hyperedge* edges[8] = { + el0, el1, el2, el3, el4, el5, el6, el7, }; - int num_edges[7] = { - ARRLEN(el0), ARRLEN(el1), ARRLEN(el2), ARRLEN(el3), ARRLEN(el4), ARRLEN(el5), ARRLEN(el6), + int num_edges[8] = { + ARRLEN(el0), ARRLEN(el1), ARRLEN(el2), ARRLEN(el3), ARRLEN(el4), ARRLEN(el5), ARRLEN(el6), ARRLEN(el7), }; // construct the TTNO graph struct ttno_graph graph = { - .topology = topology, - .verts = vertices, - .edges = edges, - .num_verts = num_vertices, - .num_edges = num_edges, - .nsites = nsites, + .topology = topology, + .verts = vertices, + .edges = edges, + .num_verts = num_vertices, + .num_edges = num_edges, + .nsites_physical = nsites_physical, + .nsites_branching = nsites_branching, }; if (!ttno_graph_is_consistent(&graph)) { return "internal TTNO graph construction is inconsistent"; @@ -223,7 +240,7 @@ char* test_ttno_from_assembly() delete_dense_tensor(&opmap_tensor); // coefficient map; first two entries must always be 0 and 1 - const double coeffmap[] = { 0, 1, -0.7, -0.1, 0.8, -0.2, -1.3, -1.2, -0.3, 0.6, 0.4, 0.7, -1.1, 1.8, 0.2, 0.5, 0.9, 0.3, -0.4, -1.4, -0.9, }; + const double coeffmap[] = { 0, 1, -0.7, -0.1, 0.8, -0.2, -1.3, -1.2, -0.3, 0.6, 0.4, 0.7, -1.1, 1.8, 0.2, 0.5, 0.9, 0.3, -0.4, -1.4, -0.9, 1.5 }; struct ttno_assembly assembly = { .graph = graph, diff --git a/test/operator/test_ttno.py b/test/operator/test_ttno.py index 0235e1a..e263969 100644 --- a/test/operator/test_ttno.py +++ b/test/operator/test_ttno.py @@ -19,76 +19,89 @@ def ttno_from_assembly_data(): # physical quantum numbers qd = np.array([0, -1, 1]) - # number of lattice sites - nsites = 7 + # number of physical lattice sites + nsites_physical = 6 # tree topology: # - # 4 6 + # 4 0 # \ / # \ / - # 0 + # 6 # | # | - # 2 --- 3 --- 1 + # 2 --- 5 --- 1 --- 7 # | # | - # 5 + # 3 # vertex quantum numbers vert_qnums = [ - -1, 0, # (0, 3) - 1, 0, # (0, 4) - 0, -1, 1, # (0, 6) - 1, 0, # (1, 3) - -1, 0, # (2, 3) + 0, 1, -1, # (0, 6) + 1, 0, # (1, 5) + 0, # (1, 7) + -1, 0, # (2, 5) 0, # (3, 5) + -1, 0, # (4, 6) + 1, 0, # (5, 6) ] + # offsets + vos06 = 0 + vos15 = vos06 + 3 + vos17 = vos15 + 2 + vos25 = vos17 + 1 + vos35 = vos25 + 2 + vos46 = vos35 + 1 + vos56 = vos46 + 2 edge_info = [ [ # site 0 - OpHyperedgeInfo(( 0, 2 + 1, 4 + 0 ), ( 0, )), - OpHyperedgeInfo(( 1, 2 + 0, 4 + 2 ), ( 4, 1 )), - OpHyperedgeInfo(( 0, 2 + 0, 4 + 0 ), ( 5, 6 )), - OpHyperedgeInfo(( 0, 2 + 1, 4 + 1 ), ( 7, ))], + OpHyperedgeInfo((vos06 + 1, ), ( 4, )), + OpHyperedgeInfo((vos06 + 0, ), (22, )), + OpHyperedgeInfo((vos06 + 2, ), ( 7, 17 )), + OpHyperedgeInfo((vos06 + 0, ), (24, ))], [ # site 1 - OpHyperedgeInfo(( 7 + 0, ), ( 8, )), - OpHyperedgeInfo(( 7 + 1, ), ( 9, )), - OpHyperedgeInfo(( 7 + 0, ), (10, 11 ))], + OpHyperedgeInfo((vos15 + 0, vos17 + 0, ), ( 8, )), + OpHyperedgeInfo((vos15 + 1, vos17 + 0, ), ( 9, )), + OpHyperedgeInfo((vos15 + 0, vos17 + 0, ), (10, 11 ))], [ # site 2 - OpHyperedgeInfo(( 9 + 1, ), (12, )), - OpHyperedgeInfo(( 9 + 0, ), (13, ))], + OpHyperedgeInfo((vos25 + 1, ), (12, )), + OpHyperedgeInfo((vos25 + 0, ), (13, ))], [ # site 3 - OpHyperedgeInfo(( 1, 7 + 0, 9 + 0, 11 + 0), (14, )), - OpHyperedgeInfo(( 0, 7 + 1, 9 + 1, 11 + 0), (10, )), - OpHyperedgeInfo(( 0, 7 + 1, 9 + 1, 11 + 0), (15, 16 ))], + OpHyperedgeInfo((vos35 + 0, ), (18, 19, 20))], [ # site 4 - OpHyperedgeInfo(( 2 + 0, ), (17, )), - OpHyperedgeInfo(( 2 + 1, ), (14, ))], + OpHyperedgeInfo((vos46 + 0, ), (17, )), + OpHyperedgeInfo((vos46 + 1, ), (14, ))], [ # site 5 - OpHyperedgeInfo((11 + 0, ), (18, 19, 20))], + OpHyperedgeInfo((vos15 + 0, vos25 + 0, vos35 + 0, vos56 + 1), (14, )), + OpHyperedgeInfo((vos15 + 1, vos25 + 1, vos35 + 0, vos56 + 0), (10, )), + OpHyperedgeInfo((vos15 + 1, vos25 + 1, vos35 + 0, vos56 + 0), (15, 16 ))], [ # site 6 - OpHyperedgeInfo(( 4 + 1, ), (21, )), - OpHyperedgeInfo(( 4 + 0, ), (22, )), - OpHyperedgeInfo(( 4 + 2, ), (23, 17 )), - OpHyperedgeInfo(( 4 + 0, ), (24, ))], + OpHyperedgeInfo((vos06 + 2, vos46 + 1, vos56 + 0 ), (-1, )), + OpHyperedgeInfo((vos06 + 1, vos46 + 0, vos56 + 1 ), (-1, -1 )), + OpHyperedgeInfo((vos06 + 0, vos46 + 0, vos56 + 0 ), (-1, -1 )), + OpHyperedgeInfo((vos06 + 0, vos46 + 1, vos56 + 1 ), (-1, ))], + [ # site 7 + OpHyperedgeInfo((vos17 + 0, ), (-1, )), + OpHyperedgeInfo((vos17 + 0, ), (-1, ))], ] - # sign factors of virtual bond quantum numbers + # tensor axes directions, equal to sign factors of virtual bond quantum numbers qbond_signs = [ - [-1, -1, -1 ], # attached to site 0 - [-1, ], # attached to site 1 + [-1 ], # attached to site 0 + [-1, -1 ], # attached to site 1 [-1 ], # attached to site 2 - [ 1, 1, 1, -1], # attached to site 3 - [ 1 ], # attached to site 4 - [ 1 ], # attached to site 5 - [ 1 ], # attached to site 6 + [-1 ], # attached to site 3 + [-1 ], # attached to site 4 + [ 1, 1, 1, -1], # attached to site 5 + [ 1, 1, 1 ], # attached to site 6 + [ 1 ], # attached to site 7 ] # random local operators opmap = [rng.standard_normal(2 * (len(qd),)) for oid in range(25)] # enforce sparsity pattern according to quantum numbers - for k in range(nsites): + for k in range(nsites_physical): for ei in edge_info[k]: qsum = np.dot(qbond_signs[k], [vert_qnums[vid] for vid in ei.vids]) mask = ptn.qnumber_outer_sum([qd, -qd, [qsum]])[:, :, 0] diff --git a/test/operator/test_ttno_graph.c b/test/operator/test_ttno_graph.c index de6aad4..c581ef5 100644 --- a/test/operator/test_ttno_graph.c +++ b/test/operator/test_ttno_graph.c @@ -13,33 +13,35 @@ char* test_ttno_graph_from_opchains() return "'H5Fopen' in test_ttno_graph_from_opchains failed"; } - // number of lattice sites - const int nsites = 8; + // number of physical and branching lattice sites + const int nsites_physical = 5; + const int nsites_branching = 3; + const int nsites = nsites_physical + nsites_branching; // local physical dimension const long d = 3; - const long dim_full = ipow(d, nsites); + const long dim_full_physical = ipow(d, nsites_physical); // tree topology: // - // 4 6 + // 4 0 // \ / // \ / - // 0 + // 6 // | // | - // 2 --- 3 --- 1 --- 7 + // 2 --- 5 --- 1 --- 7 // | // | - // 5 + // 3 // - int neigh0[] = { 3, 4, 6 }; - int neigh1[] = { 3, 7 }; - int neigh2[] = { 3 }; - int neigh3[] = { 0, 1, 2, 5 }; - int neigh4[] = { 0 }; - int neigh5[] = { 3 }; - int neigh6[] = { 0 }; + int neigh0[] = { 6 }; + int neigh1[] = { 5, 7 }; + int neigh2[] = { 5 }; + int neigh3[] = { 5 }; + int neigh4[] = { 6 }; + int neigh5[] = { 1, 2, 3, 6 }; + int neigh6[] = { 0, 4, 5 }; int neigh7[] = { 1 }; int* neighbor_map[8] = { neigh0, neigh1, neigh2, neigh3, neigh4, neigh5, neigh6, neigh7, @@ -85,7 +87,7 @@ char* test_ttno_graph_from_opchains() } struct ttno_graph graph; - if (ttno_graph_from_opchains(chains, nchains, &topology, &graph) < 0) { + if (ttno_graph_from_opchains(chains, nchains, nsites_physical, &topology, &graph) < 0) { return "'ttno_graph_from_opchains' failed internally"; } if (!ttno_graph_is_consistent(&graph)) { @@ -93,33 +95,33 @@ char* test_ttno_graph_from_opchains() } // bond dimensions - int rank_046_12357; - if (read_hdf5_attribute(file, "rank_046_12357", H5T_NATIVE_INT, &rank_046_12357) < 0) { + int rank_04_123; + if (read_hdf5_attribute(file, "rank_04_123", H5T_NATIVE_INT, &rank_04_123) < 0) { return "reading operator partitioning rank from disk failed"; } - int rank_17_023456; - if (read_hdf5_attribute(file, "rank_17_023456", H5T_NATIVE_INT, &rank_17_023456) < 0) { + int rank_1_0234; + if (read_hdf5_attribute(file, "rank_1_0234", H5T_NATIVE_INT, &rank_1_0234) < 0) { return "reading operator partitioning rank from disk failed"; } - int rank_2_0134567; - if (read_hdf5_attribute(file, "rank_2_0134567", H5T_NATIVE_INT, &rank_2_0134567) < 0) { + int rank_2_0134; + if (read_hdf5_attribute(file, "rank_2_0134", H5T_NATIVE_INT, &rank_2_0134) < 0) { return "reading operator partitioning rank from disk failed"; } - int rank_6_0123457; - if (read_hdf5_attribute(file, "rank_6_0123457", H5T_NATIVE_INT, &rank_6_0123457) < 0) { + int rank_4_0123; + if (read_hdf5_attribute(file, "rank_4_0123", H5T_NATIVE_INT, &rank_4_0123) < 0) { return "reading operator partitioning rank from disk failed"; } - if (graph.num_verts[0*nsites + 3] != rank_046_12357) { + if (graph.num_verts[5*nsites + 6] != rank_04_123) { return "virtual bond dimension does not match expected partitioning matrix rank"; } - if (graph.num_verts[1*nsites + 3] != rank_17_023456) { + if (graph.num_verts[1*nsites + 5] != rank_1_0234) { return "virtual bond dimension does not match expected partitioning matrix rank"; } - if (graph.num_verts[2*nsites + 3] != rank_2_0134567) { + if (graph.num_verts[2*nsites + 5] != rank_2_0134) { return "virtual bond dimension does not match expected partitioning matrix rank"; } - if (graph.num_verts[0*nsites + 6] != rank_6_0123457) { + if (graph.num_verts[4*nsites + 6] != rank_4_0123) { return "virtual bond dimension does not match expected partitioning matrix rank"; } @@ -155,12 +157,12 @@ char* test_ttno_graph_from_opchains() // sum matrix representations of individual operator chains, as reference struct dense_tensor mat_ref; - const long dim_mat_ref[2] = { dim_full, dim_full }; + const long dim_mat_ref[2] = { dim_full_physical, dim_full_physical }; allocate_dense_tensor(CT_DOUBLE_COMPLEX, 2, dim_mat_ref, &mat_ref); for (int i = 0; i < nchains; i++) { struct dense_tensor c; - op_chain_to_matrix(&chains[i], d, nsites, opmap, coeffmap, CT_DOUBLE_COMPLEX, &c); + op_chain_to_matrix(&chains[i], d, nsites_physical, opmap, coeffmap, CT_DOUBLE_COMPLEX, &c); dense_tensor_scalar_multiply_add(numeric_one(CT_DOUBLE_COMPLEX), &c, &mat_ref); delete_dense_tensor(&c); } diff --git a/test/operator/test_ttno_graph.py b/test/operator/test_ttno_graph.py index 0e19187..ceec23c 100644 --- a/test/operator/test_ttno_graph.py +++ b/test/operator/test_ttno_graph.py @@ -15,8 +15,8 @@ def ttno_graph_from_opchains_data(): qd = np.array([0, -1, 1]) # local physical dimension d = len(qd) - # number of sites - nsites = 8 + # number of physical sites + nsites_physical = 5 # identity operator ID oid_identity = 0 @@ -26,21 +26,21 @@ def ttno_graph_from_opchains_data(): coeffmap = np.concatenate((np.array([0., 1.]), ptn.crandn(7, rng))) cids = [5, 8, 7, 4, 3, 2, 3, 6, 4] - chains = [ # 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 - ptn.OpChain([ 6, 9, 5, 15 ], [ 0, -1, -1, 1, 0 ], coeffmap[cids[0]], 2), - ptn.OpChain([ 8, 16, 2, 11, 3 ], [ 0, -1, 1, 2, 1, 0 ], coeffmap[cids[1]], 0), - ptn.OpChain([ 10, 1, 9 ], [ 0, 1, 0, 0 ], coeffmap[cids[2]], 2), - ptn.OpChain([ 9, 7, 13, 12, 1], [ 0, 0, -1, -1, 1, 0], coeffmap[cids[3]], 3), - ptn.OpChain([ 0, 17, 4, 7 ], [ 0, 0, 1, 1, 0 ], coeffmap[cids[4]], 1), - ptn.OpChain([15, 14 ], [ 0, -1, 0 ], coeffmap[cids[5]], 0), - ptn.OpChain([ 9, 2, 1 ], [ 0, 0, 1, 0 ], coeffmap[cids[6]], 1), - ptn.OpChain([ 7, 0, 0, 0, 14, 0, 9 ], [ 0, -1, -1, -1, -1, 0, 0, 0 ], coeffmap[cids[7]], 0), - ptn.OpChain([ 10, 0, 7 ], [ 0, 1, 1, 0 ], coeffmap[cids[8]], 4), + chains = [ # 0 1 2 3 4 0 1 2 3 4 + ptn.OpChain([ 6, 9, 5, 15], [ 0, -1, -1, 1, 0], coeffmap[cids[0]], 1), + ptn.OpChain([ 8, 16, 2, 11, 3], [ 0, -1, 1, 2, 1, 0], coeffmap[cids[1]], 0), + ptn.OpChain([ 10, 1 ], [ 0, 1, 0 ], coeffmap[cids[2]], 2), + ptn.OpChain([ 9, 7, 13, 12, 1], [ 0, 0, -1, -1, 1, 0], coeffmap[cids[3]], 0), + ptn.OpChain([ 0, 17, 4, 7], [ 0, 0, 1, 1, 0], coeffmap[cids[4]], 1), + ptn.OpChain([ 15, 14], [ 0, -1, 0], coeffmap[cids[5]], 3), + ptn.OpChain([ 9, 2, 1 ], [ 0, 0, 1, 0 ], coeffmap[cids[6]], 1), + ptn.OpChain([ 7, 0, 0, 14, 9], [ 0, -1, -1, -1, 0, 0], coeffmap[cids[7]], 0), + ptn.OpChain([ 10, 0, 7], [ 0, 1, 1, 0], coeffmap[cids[8]], 2), ] - graph = ptn.OpGraph.from_opchains(chains, nsites, oid_identity) + graph = ptn.OpGraph.from_opchains(chains, nsites_physical, oid_identity) assert graph.is_consistent() - assert graph.length == nsites + assert graph.length == nsites_physical # random local operators opmap = [np.identity(len(qd), dtype=complex) if opid == oid_identity else ptn.crandn((d, d), rng) @@ -62,16 +62,16 @@ def ttno_graph_from_opchains_data(): mat_ref = mat_ref + np.kron(np.kron( np.identity(len(qd)**chain.istart), chain.as_matrix(opmap)), - np.identity(len(qd)**(nsites - (chain.istart + chain.length)))) + np.identity(len(qd)**(nsites_physical - (chain.istart + chain.length)))) - # group sites (0, 4, 6) and (1, 2, 3, 5, 7) - rank_046_12357 = _operator_partition_rank(mat_ref, d, (0, 4, 6), (1, 2, 3, 5, 7)) - # group sites (1, 7) and (0, 2, 3, 4, 5, 6) - rank_17_023456 = _operator_partition_rank(mat_ref, d, (1, 7), (0, 2, 3, 4, 5, 6)) - # group sites (2) and (0, 1, 3, 4, 5, 6, 7) - rank_2_0134567 = _operator_partition_rank(mat_ref, d, (2,), (0, 1, 3, 4, 5, 6, 7)) - # group sites (6) and (0, 1, 2, 3, 4, 5, 7) - rank_6_0123457 = _operator_partition_rank(mat_ref, d, (6,), (0, 1, 2, 3, 4, 5, 7)) + # group sites (0, 4) and (1, 2, 3) + rank_04_123 = _operator_partition_rank(mat_ref, d, (0, 4), (1, 2, 3)) + # group sites (1,) and (0, 2, 3, 4) + rank_1_0234 = _operator_partition_rank(mat_ref, d, (1,), (0, 2, 3, 4)) + # group sites (2) and (0, 1, 3, 4) + rank_2_0134 = _operator_partition_rank(mat_ref, d, (2,), (0, 1, 3, 4)) + # group sites (4) and (0, 1, 2, 3) + rank_4_0123 = _operator_partition_rank(mat_ref, d, (4,), (0, 1, 2, 3)) with h5py.File("data/test_ttno_graph_from_opchains.hdf5", "w") as file: for i, chain in enumerate(chains): @@ -82,13 +82,12 @@ def ttno_graph_from_opchains_data(): file.attrs[f"/chain{i}/istart"] = chain.istart file["opmap"] = interleave_complex(np.array(opmap)) file["coeffmap"] = interleave_complex(coeffmap) - file.attrs["rank_046_12357"] = rank_046_12357 - file.attrs["rank_17_023456"] = rank_17_023456 - # note: local operators opmap[2], opmap[10], opmap[17] acting on site 2 - # span a two-dimensional subspace, - # hence numerically determined rank is one below general abstract case - file.attrs["rank_2_0134567"] = rank_2_0134567 + 1 - file.attrs["rank_6_0123457"] = rank_6_0123457 + file.attrs["rank_04_123"] = rank_04_123 + file.attrs["rank_1_0234"] = rank_1_0234 + # note: local operators acting on sites 2 and 4 span a 5-dimensional subspace, + # hence numerically determined rank is smaller than general abstract case + file.attrs["rank_2_0134"] = rank_2_0134 + 1 + file.attrs["rank_4_0123"] = rank_4_0123 + 2 def _operator_partition_rank(u, d: int, sites_a, sites_b):