Skip to content

Commit

Permalink
Remove eval_tn_MPI_expectation
Browse files Browse the repository at this point in the history
  • Loading branch information
Tankya2 committed Jan 30, 2024
1 parent 66aaf0e commit fea2b11
Showing 1 changed file with 0 additions and 65 deletions.
65 changes: 0 additions & 65 deletions src/qibotn/cutn.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,71 +368,6 @@ def eval_tn_MPI_2_expectation(qibo_circ, datatype, n_samples=8):

return result, rank


def eval_tn_MPI_expectation(qibo_circ, datatype, n_samples=8):
from mpi4py import MPI # this line initializes MPI
import socket

# Get the hostname
# hostname = socket.gethostname()

ncpu_threads = multiprocessing.cpu_count() // 2

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: Start",mem_avail, "rank =",rank, "hostname =",hostname)
device_id = rank % getDeviceCount()
cp.cuda.Device(device_id).use()

handle = cutn.create()
network_opts = cutn.NetworkOptions(handle=handle, blocking="auto")
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft network opts",mem_avail, "rank =",rank)
cutn.distributed_reset_configuration(handle, *cutn.get_mpi_comm_pointer(comm))
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft distributed reset config",mem_avail, "rank =",rank)
# Perform circuit conversion
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
operands_interleave = myconvertor.expectation_operands(
PauliStringGen(qibo_circ.nqubits)
)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft convetor",mem_avail, "rank =",rank)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft operand interleave",mem_avail, "rank =",rank)

# Pathfinder: To search for the optimal path. Optimal path are assigned to path and info attribute of the network object.
network = cutn.Network(*operands_interleave, options=network_opts)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft cutn.Network(*operands_interleave,",mem_avail, "rank =",rank)
path, opt_info = network.contract_path(
optimize={
"samples": n_samples,
"threads": ncpu_threads,
"slicing": {"min_slices": max(16, size)},
}
)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft contract path",mem_avail, "rank =",rank)
# Execution: To execute the contraction using the optimal path found previously
# print("opt_cost",opt_info.opt_cost, "Process =",rank)

num_slices = opt_info.num_slices # Andy
chunk, extra = num_slices // size, num_slices % size # Andy
slice_begin = rank * chunk + min(rank, extra) # Andy
slice_end = (
num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)
) # Andy
slices = range(slice_begin, slice_end) # Andy
result = network.contract(slices=slices)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft contract",mem_avail, "rank =",rank)
cutn.destroy(handle)

return result, rank

def eval_mps(qibo_circ, gate_algo, datatype):
myconvertor = QiboCircuitToMPS(qibo_circ, gate_algo, dtype=datatype)
mps_helper = MPSContractionHelper(myconvertor.num_qubits)
Expand Down

0 comments on commit fea2b11

Please sign in to comment.