|
| 1 | +const builtin = @import("builtin"); |
| 2 | +const AtomicOrder = builtin.AtomicOrder; |
| 3 | +const AtomicRmwOp = builtin.AtomicRmwOp; |
| 4 | + |
| 5 | +/// Many producer, many consumer, non-allocating, thread-safe, lock-free |
| 6 | +/// This implementation has a crippling limitation - it hangs onto node |
| 7 | +/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that |
| 8 | +/// node must not be freed until both the next get() and the next put() completes. |
| 9 | +pub fn QueueMpmc(comptime T: type) type { |
| 10 | + return struct { |
| 11 | + head: *Node, |
| 12 | + tail: *Node, |
| 13 | + root: Node, |
| 14 | + |
| 15 | + pub const Self = this; |
| 16 | + |
| 17 | + pub const Node = struct { |
| 18 | + next: ?*Node, |
| 19 | + data: T, |
| 20 | + }; |
| 21 | + |
| 22 | + /// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287 |
| 23 | + pub fn init(self: *Self) void { |
| 24 | + self.root.next = null; |
| 25 | + self.head = &self.root; |
| 26 | + self.tail = &self.root; |
| 27 | + } |
| 28 | + |
| 29 | + pub fn put(self: *Self, node: *Node) void { |
| 30 | + node.next = null; |
| 31 | + |
| 32 | + const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); |
| 33 | + _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); |
| 34 | + } |
| 35 | + |
| 36 | + /// node must not be freed until both the next get() and the next put() complete |
| 37 | + pub fn get(self: *Self) ?*Node { |
| 38 | + var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst); |
| 39 | + while (true) { |
| 40 | + const node = head.next orelse return null; |
| 41 | + head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node; |
| 42 | + } |
| 43 | + } |
| 44 | + |
| 45 | + ///// This is a debug function that is not thread-safe. |
| 46 | + pub fn dump(self: *Self) void { |
| 47 | + std.debug.warn("head: "); |
| 48 | + dumpRecursive(self.head, 0); |
| 49 | + std.debug.warn("tail: "); |
| 50 | + dumpRecursive(self.tail, 0); |
| 51 | + } |
| 52 | + |
| 53 | + fn dumpRecursive(optional_node: ?*Node, indent: usize) void { |
| 54 | + var stderr_file = std.io.getStdErr() catch return; |
| 55 | + const stderr = &std.io.FileOutStream.init(&stderr_file).stream; |
| 56 | + stderr.writeByteNTimes(' ', indent) catch return; |
| 57 | + if (optional_node) |node| { |
| 58 | + std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data); |
| 59 | + dumpRecursive(node.next, indent + 1); |
| 60 | + } else { |
| 61 | + std.debug.warn("(null)\n"); |
| 62 | + } |
| 63 | + } |
| 64 | + }; |
| 65 | +} |
| 66 | + |
| 67 | +const std = @import("std"); |
| 68 | +const assert = std.debug.assert; |
| 69 | + |
| 70 | +const Context = struct { |
| 71 | + allocator: *std.mem.Allocator, |
| 72 | + queue: *QueueMpmc(i32), |
| 73 | + put_sum: isize, |
| 74 | + get_sum: isize, |
| 75 | + get_count: usize, |
| 76 | + puts_done: u8, // TODO make this a bool |
| 77 | +}; |
| 78 | + |
| 79 | +// TODO add lazy evaluated build options and then put puts_per_thread behind |
| 80 | +// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor |
| 81 | +// CI we would use a less aggressive setting since at 1 core, while we still |
| 82 | +// want this test to pass, we need a smaller value since there is so much thrashing |
| 83 | +// we would also use a less aggressive setting when running in valgrind |
| 84 | +const puts_per_thread = 500; |
| 85 | +const put_thread_count = 3; |
| 86 | + |
| 87 | +test "std.atomic.queue_mpmc" { |
| 88 | + var direct_allocator = std.heap.DirectAllocator.init(); |
| 89 | + defer direct_allocator.deinit(); |
| 90 | + |
| 91 | + var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024); |
| 92 | + defer direct_allocator.allocator.free(plenty_of_memory); |
| 93 | + |
| 94 | + var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); |
| 95 | + var a = &fixed_buffer_allocator.allocator; |
| 96 | + |
| 97 | + var queue: QueueMpmc(i32) = undefined; |
| 98 | + queue.init(); |
| 99 | + var context = Context{ |
| 100 | + .allocator = a, |
| 101 | + .queue = &queue, |
| 102 | + .put_sum = 0, |
| 103 | + .get_sum = 0, |
| 104 | + .puts_done = 0, |
| 105 | + .get_count = 0, |
| 106 | + }; |
| 107 | + |
| 108 | + var putters: [put_thread_count]*std.os.Thread = undefined; |
| 109 | + for (putters) |*t| { |
| 110 | + t.* = try std.os.spawnThread(&context, startPuts); |
| 111 | + } |
| 112 | + var getters: [put_thread_count]*std.os.Thread = undefined; |
| 113 | + for (getters) |*t| { |
| 114 | + t.* = try std.os.spawnThread(&context, startGets); |
| 115 | + } |
| 116 | + |
| 117 | + for (putters) |t| |
| 118 | + t.wait(); |
| 119 | + _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); |
| 120 | + for (getters) |t| |
| 121 | + t.wait(); |
| 122 | + |
| 123 | + if (context.put_sum != context.get_sum) { |
| 124 | + std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum); |
| 125 | + } |
| 126 | + |
| 127 | + if (context.get_count != puts_per_thread * put_thread_count) { |
| 128 | + std.debug.panic( |
| 129 | + "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", |
| 130 | + context.get_count, |
| 131 | + u32(puts_per_thread), |
| 132 | + u32(put_thread_count), |
| 133 | + ); |
| 134 | + } |
| 135 | +} |
| 136 | + |
| 137 | +fn startPuts(ctx: *Context) u8 { |
| 138 | + var put_count: usize = puts_per_thread; |
| 139 | + var r = std.rand.DefaultPrng.init(0xdeadbeef); |
| 140 | + while (put_count != 0) : (put_count -= 1) { |
| 141 | + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz |
| 142 | + const x = @bitCast(i32, r.random.scalar(u32)); |
| 143 | + const node = ctx.allocator.create(QueueMpmc(i32).Node{ |
| 144 | + .next = undefined, |
| 145 | + .data = x, |
| 146 | + }) catch unreachable; |
| 147 | + ctx.queue.put(node); |
| 148 | + _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); |
| 149 | + } |
| 150 | + return 0; |
| 151 | +} |
| 152 | + |
| 153 | +fn startGets(ctx: *Context) u8 { |
| 154 | + while (true) { |
| 155 | + const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; |
| 156 | + |
| 157 | + while (ctx.queue.get()) |node| { |
| 158 | + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz |
| 159 | + _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); |
| 160 | + _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); |
| 161 | + } |
| 162 | + |
| 163 | + if (last) return 0; |
| 164 | + } |
| 165 | +} |
| 166 | + |
| 167 | +test "std.atomic.queue_mpmc single-threaded" { |
| 168 | + var queue: QueueMpmc(i32) = undefined; |
| 169 | + queue.init(); |
| 170 | + |
| 171 | + var node_0 = QueueMpmc(i32).Node{ |
| 172 | + .data = 0, |
| 173 | + .next = undefined, |
| 174 | + }; |
| 175 | + queue.put(&node_0); |
| 176 | + |
| 177 | + var node_1 = QueueMpmc(i32).Node{ |
| 178 | + .data = 1, |
| 179 | + .next = undefined, |
| 180 | + }; |
| 181 | + queue.put(&node_1); |
| 182 | + |
| 183 | + assert(queue.get().?.data == 0); |
| 184 | + |
| 185 | + var node_2 = QueueMpmc(i32).Node{ |
| 186 | + .data = 2, |
| 187 | + .next = undefined, |
| 188 | + }; |
| 189 | + queue.put(&node_2); |
| 190 | + |
| 191 | + var node_3 = QueueMpmc(i32).Node{ |
| 192 | + .data = 3, |
| 193 | + .next = undefined, |
| 194 | + }; |
| 195 | + queue.put(&node_3); |
| 196 | + |
| 197 | + assert(queue.get().?.data == 1); |
| 198 | + |
| 199 | + assert(queue.get().?.data == 2); |
| 200 | + |
| 201 | + var node_4 = QueueMpmc(i32).Node{ |
| 202 | + .data = 4, |
| 203 | + .next = undefined, |
| 204 | + }; |
| 205 | + queue.put(&node_4); |
| 206 | + |
| 207 | + assert(queue.get().?.data == 3); |
| 208 | + // if we were to set node_3.next to null here, it would cause this test |
| 209 | + // to fail. this demonstrates the limitation of hanging on to extra memory. |
| 210 | + |
| 211 | + assert(queue.get().?.data == 4); |
| 212 | + |
| 213 | + assert(queue.get() == null); |
| 214 | +} |
0 commit comments