From d194e49d8f03e17ca2e664bae865fcb6bb0f62be Mon Sep 17 00:00:00 2001 From: YiraSan Date: Tue, 28 Jan 2025 17:51:24 +0100 Subject: [PATCH] test locks on cancellation --- src/coro/sync.zig | 140 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/src/coro/sync.zig b/src/coro/sync.zig index 8c28c64..1b73bf9 100644 --- a/src/coro/sync.zig +++ b/src/coro/sync.zig @@ -333,3 +333,143 @@ test "RwLock" { std.debug.assert(lock.counter == 0); std.debug.assert(lock.locked == false); } + +test "Mutex.Cancel" { + const Test = struct { + fn incrementer(lock: *Mutex, value: *usize, check_value: *usize) !void { + while (true) { + try lock.lock(); + defer lock.unlock(); + + value.* += 1000; + + const stored = check_value.*; + + // simulates a "workload" + makes coroutines to try lock + try coro.io.single(.timeout, .{ .ns = std.time.ns_per_ms }); + + check_value.* = stored + 1000; + } + } + + fn cancel(canceled: *bool) !void { + try coro.io.single(.timeout, .{ .ns = 16 * std.time.ns_per_ms }); + canceled.* = true; + } + + fn test_thread(lock: *Mutex, value: *usize, check_value: *usize) !void { + var scheduler = try coro.Scheduler.init(std.testing.allocator, .{}); + defer scheduler.deinit(); + + for (0..128) |_| { + _ = try scheduler.spawn(incrementer, .{ lock, value, check_value }, .{ .detached = true }); + } + + var canceled = false; + _ = try scheduler.spawn(cancel, .{&canceled}, .{ .detached = true }); + + while (!canceled) { + _ = try scheduler.tick(.blocking); + } + + try scheduler.run(.cancel); + } + }; + + var lock = try Mutex.init(); + defer lock.deinit(); + + var value: usize = 0; + var check_value: usize = 0; + + var threads: [8]std.Thread = undefined; + + for (0..8) |i| { + threads[i] = try std.Thread.spawn(.{}, Test.test_thread, .{ &lock, &value, &check_value }); + } + + for (threads) |thread| { + thread.join(); + } + + std.debug.assert(value == check_value); +} + +test "RwLock.Cancel" { + const Test = struct { + fn incrementer(lock: *RwLock, value: *usize, check_value: *usize) !void { + while (true) { + try lock.lock(); + defer lock.unlock(); + + value.* += 1000; + + const stored = check_value.*; + + // simulates a "workload" + makes coroutines to try lock + try coro.io.single(.timeout, .{ .ns = std.time.ns_per_ms }); + + check_value.* = stored + 1000; + } + } + + fn locksharer(lock: *RwLock) !void { + while (true) { + // simulates a "workload" + try coro.io.single(.timeout, .{ .ns = std.time.ns_per_ms }); + + try lock.lockShared(); + defer lock.unlock(); + } + } + + fn cancel(canceled: *bool) !void { + try coro.io.single(.timeout, .{ .ns = 16 * std.time.ns_per_ms }); + canceled.* = true; + } + + fn test_thread(lock: *RwLock, value: *usize, check_value: *usize) !void { + var scheduler = try coro.Scheduler.init(std.testing.allocator, .{}); + defer scheduler.deinit(); + + for (0..128) |_| { + _ = try scheduler.spawn(incrementer, .{ lock, value, check_value }, .{ .detached = true }); + } + + for (0..16) |_| { + _ = try scheduler.spawn(locksharer, .{lock}, .{ .detached = true }); + } + + var canceled = false; + _ = try scheduler.spawn(cancel, .{&canceled}, .{ .detached = true }); + + while (!canceled) { + _ = try scheduler.tick(.blocking); + } + + try scheduler.run(.cancel); + } + }; + + var lock = try RwLock.init(); + defer lock.deinit(); + + var value: usize = 0; + var check_value: usize = 0; + + var threads: [8]std.Thread = undefined; + + for (0..8) |i| { + threads[i] = try std.Thread.spawn(.{}, Test.test_thread, .{ &lock, &value, &check_value }); + } + + for (threads) |thread| { + thread.join(); + } + + std.debug.assert(value == check_value); + + // check if it has successfully returned in its initial state. + std.debug.assert(lock.counter == 0); + std.debug.assert(lock.locked == false); +}