Skip to content

Commit

Permalink
Update unit tests per feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
IsaevIlya committed Apr 5, 2024
1 parent 4016ad7 commit 654d6e0
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 32 deletions.
2 changes: 1 addition & 1 deletion s3torchconnector/src/s3torchconnector/s3reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def prefetch(self) -> None:

def readinto(self, buf) -> int:
"""Read up to len(buf) bytes into a pre-allocated, writable bytes-like object buf.
Return the number of bytes read. If no bytes available, zero is returned.
Return the number of bytes read. If no bytes are available, zero is returned.
Args:
buf : writable bytes-like object
Expand Down
58 changes: 27 additions & 31 deletions s3torchconnector/tst/unit/test_s3reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,8 @@ def test_s3reader_writes_size_after_read_all_explicit(stream: List[bytes]):


@given(
lists(binary(min_size=2, max_size=3), min_size=0, max_size=3),
integers(min_value=0, max_value=1),
lists(binary(min_size=20, max_size=30), min_size=0, max_size=2),
integers(min_value=0, max_value=10),
)
def test_s3reader_readinto_buffer_smaller_then_chunks(
stream: List[bytes], buf_size: int
Expand All @@ -330,45 +330,41 @@ def test_s3reader_readinto_buffer_smaller_then_chunks(


@given(
lists(binary(min_size=2, max_size=3), min_size=1, max_size=3),
integers(min_value=3, max_value=10),
lists(binary(min_size=20, max_size=30), min_size=2, max_size=3),
integers(min_value=30, max_value=40),
)
def test_s3reader_readinto_buffer_bigger_then_chunks(
stream: List[bytes], buf_size: int
):
s3reader = S3Reader(TEST_BUCKET, TEST_KEY, lambda: None, lambda: iter(stream))
assert s3reader._size is None
total_length = sum(map(len, stream))
buf = memoryview(bytearray(buf_size))
should_read_bytes_in_first_pass = min(buf_size, total_length)
# We're able to read all the available data or the data that can be accommodated in buf
assert s3reader.readinto(buf) == should_read_bytes_in_first_pass
assert s3reader.tell() == should_read_bytes_in_first_pass
# We're able to read the data that can be accommodated in buf
assert s3reader.readinto(buf) == buf_size
assert s3reader.tell() == buf_size
all_data = b"".join(stream)
# confirm that read data is the same as in source
assert (
buf[:should_read_bytes_in_first_pass]
== all_data[:should_read_bytes_in_first_pass]
)
if total_length < buf_size:
assert s3reader._size == total_length

should_read_bytes_in_second_pass = max(
min(buf_size, total_length - should_read_bytes_in_first_pass), 0
)
if should_read_bytes_in_second_pass > 0:
# We're able to read all the available data or the data that can be accommodated in buf
assert s3reader.readinto(buf) == should_read_bytes_in_second_pass
total_read = should_read_bytes_in_first_pass + should_read_bytes_in_second_pass
assert s3reader.tell() == total_read
# confirm that read data is the same as in source
assert (
buf[:should_read_bytes_in_second_pass]
== all_data[should_read_bytes_in_first_pass:total_read]
)
if total_length < total_read:
assert s3reader._size == total_read
assert (buf == all_data[:buf_size])


@given(
lists(binary(min_size=20, max_size=30), min_size=1, max_size=3),
integers(min_value=100, max_value=100),
)
def test_s3reader_readinto_buffer_bigger_then_whole_object(
stream: List[bytes], buf_size: int
):
s3reader = S3Reader(TEST_BUCKET, TEST_KEY, lambda: None, lambda: iter(stream))
assert s3reader._size is None
total_length = sum(map(len, stream))
buf = memoryview(bytearray(buf_size))
# We're able to read all the available data
assert s3reader.readinto(buf) == total_length
assert s3reader.tell() == total_length
all_data = b"".join(stream)
# confirm that read data is the same as in source
assert (buf[:total_length] == all_data)
assert s3reader._size == total_length

@given(
lists(binary(min_size=2, max_size=12), min_size=1, max_size=5),
Expand Down

0 comments on commit 654d6e0

Please sign in to comment.