Skip to content

Commit

Permalink
add comments and remove unused method
Browse files Browse the repository at this point in the history
  • Loading branch information
tohtana committed Aug 15, 2024
1 parent e9b03cf commit 7fe47d0
Showing 1 changed file with 4 additions and 14 deletions.
18 changes: 4 additions & 14 deletions deepspeed/runtime/pipe/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
PIPE_RECV_INPUT_TIMER = 'pipe_recv_input'
PIPE_RECV_GRAD_TIMER = 'pipe_recv_grad'

# The buffer size to store the meta data for each tensor.
TENSOR_META_SIZE = 256


Expand Down Expand Up @@ -937,7 +938,7 @@ def _send_tensor_meta(self, buffer, recv_stage):
meta_buffer = torch.empty(TENSOR_META_SIZE, dtype=torch.int32, device=self.device)
if isinstance(buffer, torch.Tensor):
meta_buf_list = [
0, # type of data (0: tensor, 1: list, 2: tuple)
0, # type of data (0: tensor, 1: list (unused), 2: tuple)
self.DTYPE_TO_ID[buffer.dtype], # dtype
len(buffer.size()) # ndims
]
Expand All @@ -950,7 +951,7 @@ def _send_tensor_meta(self, buffer, recv_stage):

elif isinstance(buffer, tuple):
meta_buf_list = [
2, # type of data (0: tensor, 1: list, 2: tuple)
2, # type of data (0: tensor, 1: list (unused), 2: tuple)
len(buffer) # num_tensors
]

Expand Down Expand Up @@ -993,7 +994,7 @@ def _recv_tensor_meta(self, send_stage):
recv_shape = buffer[3:3 + recv_ndims].tolist()
return self._allocate_or_extend_buffers(0, recv_shape, recv_dtype)

# List or tuple of tensors
# List or tuple of tensors (recv_type == 1 (list) is currently unused)
elif recv_type == 1 or recv_type == 2:
num_tensors = buffer[1].item()

Expand Down Expand Up @@ -1278,17 +1279,6 @@ def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
buffers.append(self._allocate_zeros(shape, **kwargs))
return buffers

def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffer = []
for shape, dtype in shapes_and_dtypes:
buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad))
buffers.append(buffer)
return buffers

def _allocate_or_extend_buffers(self, idx, shape, dtype):
numel = reduce(mul, shape) if len(shape) > 0 else 1
if len(self._grad_layer_buf) <= idx or self._grad_layer_buf[idx].numel() < numel:
Expand Down

0 comments on commit 7fe47d0

Please sign in to comment.