Skip to content

Commit

Permalink
Fix a couple issues with the object storage service benchmark.
Browse files Browse the repository at this point in the history
1. Fix enums to render correctly in Python 3.11 (fixes #5427)
2. Fix cleanup for scenarios besides api_multistream.
3. Default scenario to api_multistream.

We have only been running with scenario=api_multistream for a number of years and that has caused bugs to accumulate in the other scenarios.

PiperOrigin-RevId: 718545259
  • Loading branch information
pmkc authored and copybara-github committed Jan 30, 2025
1 parent 0534c43 commit cfcff9c
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@

flags.DEFINE_enum(
'object_storage_scenario',
'all',
'api_multistream',
[
'all',
'cli',
Expand Down Expand Up @@ -406,14 +406,15 @@
_SECONDS_PER_HOUR = 60 * 60


class MultistreamOperationType(enum.Enum):
class MultistreamOperationType(enum.StrEnum):
"""MultiStream Operations supported by object_storage_api_tests script."""

download = 1
upload = 2
delete = 3
bulk_delete = 4
redownload = 5
# pylint: disable=invalid-name
download = enum.auto()
upload = enum.auto()
delete = enum.auto()
bulk_delete = enum.auto()
redownload = enum.auto()


def GetConfig(user_config):
Expand Down Expand Up @@ -1372,6 +1373,17 @@ def MultiStreamRWBenchmark(
)
logging.info('Finished multi-stream re-read test.')

# Pre-cleanup here, whre we know what the files are.
# Also records delete latencies, even though that's not really documented.
keep_bucket = (
FLAGS.object_storage_objects_written_file_prefix is not None
or FLAGS.object_storage_dont_delete_bucket
)
if not keep_bucket:
MultiStreamDelete(
results, metadata, vms, command_builder, service, bucket_name
)


def MultiStreamWriteBenchmark(
results, metadata, vms, command_builder, service, bucket_name
Expand Down Expand Up @@ -1983,16 +1995,12 @@ def Run(benchmark_spec):
)

# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (
FLAGS.object_storage_objects_written_file_prefix is not None
or FLAGS.object_storage_dont_delete_bucket
)
if not keep_bucket:
MultiStreamDelete(
results, metadata, vms, command_builder, service, bucket_name
)
service.EmptyBucket(bucket_name)

service.UpdateSampleMetadata(results)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1351,7 +1351,7 @@ def ListConsistencyBenchmark(service):
for i in range(LIST_CONSISTENCY_THREAD_COUNT):
for j in range(len(per_thread_objects_written[i])):
# Delete about 30% of the objects written so far.
if random.Random() < LIST_AFTER_UPDATE_DELETION_RATIO:
if random.random() < LIST_AFTER_UPDATE_DELETION_RATIO:
per_thread_objects_to_delete[i].append(per_thread_objects_written[i][j])

# Now issue the delete concurrently.
Expand Down

0 comments on commit cfcff9c

Please sign in to comment.