Skip to content

Commit

Permalink
Remove eval_tn_mpi
Browse files Browse the repository at this point in the history
  • Loading branch information
Tankya2 committed Jan 30, 2024
1 parent 293af81 commit 1035629
Showing 1 changed file with 0 additions and 75 deletions.
75 changes: 0 additions & 75 deletions src/qibotn/cutn.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,80 +433,6 @@ def eval_tn_MPI_expectation(qibo_circ, datatype, n_samples=8):

return result, rank


def eval_tn_MPI(qibo_circ, datatype, n_samples=8):
"""Convert qibo circuit to tensornet (TN) format and perform contraction using multi node and multi GPU through MPI.
The conversion is performed by QiboCircuitToEinsum(), after which it goes through 2 steps: pathfinder and execution.
The pathfinder looks at user defined number of samples (n_samples) iteratively to select the least costly contraction path. This is sped up with multi thread.
After pathfinding the optimal path is used in the actual contraction to give a dense vector representation of the TN.
"""

from mpi4py import MPI # this line initializes MPI
import socket

# Get the hostname
# hostname = socket.gethostname()

ncpu_threads = multiprocessing.cpu_count() // 2

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: Start",mem_avail, "rank =",rank, "hostname =",hostname)
device_id = rank % getDeviceCount()
cp.cuda.Device(device_id).use()

handle = cutn.create()
network_opts = cutn.NetworkOptions(handle=handle, blocking="auto")
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft network opts",mem_avail, "rank =",rank)
cutn.distributed_reset_configuration(handle, *cutn.get_mpi_comm_pointer(comm))
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft distributed reset config",mem_avail, "rank =",rank)
# Perform circuit conversion
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft convetor",mem_avail, "rank =",rank)
operands_interleave = myconvertor.state_vector_operands()
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft operand interleave",mem_avail, "rank =",rank)

# Pathfinder: To search for the optimal path. Optimal path are assigned to path and info attribute of the network object.
network = cutn.Network(*operands_interleave, options=network_opts)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft cutn.Network(*operands_interleave,",mem_avail, "rank =",rank)
network.contract_path(
optimize={
"samples": n_samples,
"threads": ncpu_threads,
"slicing": {"min_slices": max(16, size)},
}
)
# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft contract path",mem_avail, "rank =",rank)
# Execution: To execute the contraction using the optimal path found previously
# print("opt_cost",opt_info.opt_cost, "Process =",rank)

"""
path, opt_info = network.contract_path(optimize={"samples": n_samples, "threads": ncpu_threads, 'slicing': {'min_slices': max(16, size)}})
num_slices = opt_info.num_slices#Andy
chunk, extra = num_slices // size, num_slices % size#Andy
slice_begin = rank * chunk + min(rank, extra)#Andy
slice_end = num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)#Andy
slices = range(slice_begin, slice_end)#Andy
result = network.contract(slices=slices)
"""
result = network.contract()

# mem_avail = cp.cuda.Device().mem_info[0]
# print("Mem avail: aft contract",mem_avail, "rank =",rank)
cutn.destroy(handle)

return result, rank


def eval_mps(qibo_circ, gate_algo, datatype):
myconvertor = QiboCircuitToMPS(qibo_circ, gate_algo, dtype=datatype)
mps_helper = MPSContractionHelper(myconvertor.num_qubits)
Expand All @@ -515,7 +441,6 @@ def eval_mps(qibo_circ, gate_algo, datatype):
myconvertor.mps_tensors, {"handle": myconvertor.handle}
)


def PauliStringGen(nqubits):
if nqubits <= 0:
return "Invalid input. N should be a positive integer."
Expand Down

0 comments on commit 1035629

Please sign in to comment.