aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/exercises/104_threading.zig
diff options
context:
space:
mode:
authorChris Boesch <chrboesch@noreply.codeberg.org>2024-03-05 09:15:57 +0100
committerChris Boesch <chrboesch@noreply.codeberg.org>2024-03-05 09:15:57 +0100
commit6984345d0af6fb88438ad260fb9146bc8c36466b (patch)
tree0a96910870f113d4ee266e2e27c46c2bb77dd66e /exercises/104_threading.zig
parent277304454a8f18b65985d61cc62c01d00b9dc2d0 (diff)
Added threading exercise
Diffstat (limited to 'exercises/104_threading.zig')
-rw-r--r--exercises/104_threading.zig129
1 files changed, 129 insertions, 0 deletions
diff --git a/exercises/104_threading.zig b/exercises/104_threading.zig
new file mode 100644
index 0000000..8aeb683
--- /dev/null
+++ b/exercises/104_threading.zig
@@ -0,0 +1,129 @@
+//
+// Whenever there is a lot to calculate, the question arises as to how
+// tasks can be carried out simultaneously. We have already learned about
+// one possibility, namely asynchronous processes, in Exercises 84-91.
+//
+// However, the computing power of the processor is only distributed to
+// the started tasks, which always reaches its limits when pure computing
+// power is called up.
+//
+// For example, in blockchains based on proof of work, the miners have
+// to find a nonce for a certain character string so that the first m bits
+// in the hash of the character string and the nonce are zeros.
+// As the miner who can solve the task first receives the reward, everyone
+// tries to complete the calculations as quickly as possible.
+//
+// This is where multithreading comes into play, where tasks are actually
+// distributed across several cores of the CPU or GPU, which then really
+// means a multiplication of performance.
+//
+// The following diagram roughly illustrates the difference between the
+// various types of process execution.
+// The 'Overall Time' column is intended to illustrate how the time is
+// affected if, instead of one core as in synchronous and asynchronous
+// processing, a second core now helps to complete the work in multithreading.
+//
+// In the ideal case shown, execution takes only half the time compared
+// to the synchronous single thread. And even asynchronous processing
+// is only slightly faster in comparison.
+//
+//
+// Synchronous Asynchronous
+// Processing Processing Multithreading
+// ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
+// │ Thread 1 │ │ Thread 1 │ │ Thread 1 │ │ Thread 2 │
+// ├──────────┤ ├──────────┤ ├──────────┤ ├──────────┤ Overall Time
+// └──┼┼┼┼┼───┴─┴──┼┼┼┼┼───┴──┴──┼┼┼┼┼───┴─┴──┼┼┼┼┼───┴──┬───────┬───────┬──
+// ├───┤ ├───┤ ├───┤ ├───┤ │ │ │
+// │ T │ │ T │ │ T │ │ T │ │ │ │
+// │ a │ │ a │ │ a │ │ a │ │ │ │
+// │ s │ │ s │ │ s │ │ s │ │ │ │
+// │ k │ │ k │ │ k │ │ k │ │ │ │
+// │ │ │ │ │ │ │ │ │ │ │
+// │ 1 │ │ 1 │ │ 1 │ │ 3 │ │ │ │
+// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ │ │ │
+// │ │ │ │ 5 Sec │ │
+// ┌────┴───┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ │ │ │
+// │Blocking│ │ T │ │ T │ │ T │ │ │ │
+// └────┬───┘ │ a │ │ a │ │ a │ │ │ │
+// │ │ s │ │ s │ │ s │ │ 8 Sec │
+// ┌─┴─┐ │ k │ │ k │ │ k │ │ │ │
+// │ T │ │ │ │ │ │ │ │ │ │
+// │ a │ │ 2 │ │ 2 │ │ 4 │ │ │ │
+// │ s │ └─┬─┘ ├───┤ ├───┤ │ │ │
+// │ k │ │ │┼┼┼│ │┼┼┼│ ▼ │ 10 Sec
+// │ │ ┌─┴─┐ └───┴────────┴───┴───────── │ │
+// │ 1 │ │ T │ │ │
+// └─┬─┘ │ a │ │ │
+// │ │ s │ │ │
+// ┌─┴─┐ │ k │ │ │
+// │ T │ │ │ │ │
+// │ a │ │ 1 │ │ │
+// │ s │ ├───┤ │ │
+// │ k │ │┼┼┼│ ▼ │
+// │ │ └───┴──────────────────────────────────────────── │
+// │ 2 │ │
+// ├───┤ │
+// │┼┼┼│ ▼
+// └───┴────────────────────────────────────────────────────────────────
+//
+//
+// The diagram was modeled on the one in a blog in which the differences
+// between asynchronous processing and multithreading are explained in detail:
+// https://blog.devgenius.io/multi-threading-vs-asynchronous-programming-what-is-the-difference-3ebfe1179a5
+//
+// Our exercise is essentially about clarifying the approach in Zig and
+// therefore we try to keep it as simple as possible.
+// Multithreading in itself is already difficult enough. ;-)
+//
+const std = @import("std");
+
+pub fn main() !void {
+ // This is where the preparatory work takes place
+ // before the parallel processing begins.
+ std.debug.print("Starting work...\n", .{});
+
+ // These curly brackets are very important, they are necessary
+ // to enclose the area where the threads are called.
+ // Without these brackets, the program would not wait for the
+ // end of the threads and they would continue to run beyond the
+ // end of the program.
+ {
+ // Now we start the first thread, with the number as parameter
+ const handle = try std.Thread.spawn(.{}, thread_function, .{1});
+
+ // Waits for the thread to complete,
+ // then deallocates any resources created on `spawn()`.
+ defer handle.join();
+
+ // Second thread
+ const handle2 = try std.Thread.spawn(.{}, thread_function, .{-4}); // that can't be right?
+ defer handle2.join();
+
+ // Third thread
+ const handle3 = try std.Thread.spawn(.{}, thread_function, .{3});
+ defer ??? // <-- something is missing
+
+ // After the threads have been started,
+ // they run in parallel and we can still do some work in between.
+ std.time.sleep((1) * std.time.ns_per_s);
+ std.debug.print("Some weird stuff, after starting the threads.\n", .{});
+ }
+ // After we have left the closed area, we wait until
+ // the threads have run through, if this has not yet been the case.
+ std.debug.print("Zig is cool!\n", .{});
+}
+
+// This function is started with every thread that we set up.
+// In our example, we pass the number of the thread as a parameter.
+fn thread_function(num: usize) !void {
+ std.debug.print("thread {d}: {s}\n", .{ num, "started." });
+ std.time.sleep((5 - num % 3) * std.time.ns_per_s);
+ std.debug.print("thread {d}: {s}\n", .{ num, "finished." });
+}
+// This is the easiest way to run threads in parallel.
+// In general, however, more management effort is required,
+// e.g. by setting up a pool and allowing the threads to communicate
+// with each other using semaphores.
+//
+// But that's a topic for another exercise.