008 event taps overhead

○ Planned This feature is planned but not yet implemented.

Event tap code injection not yet implemented

Code

// PERFORMANCE TEST: Event taps overhead vs manual instrumentation
// Goal: Prove taps have same cost as manual function calls
// Loop 1 million times with 5 tap points
// Baseline: Zig with manual function calls at same points
// Threshold: 1.05x (5% slower max)

const std = @import("std");

// Counter for tap calls (verify taps actually run)
var tap_count: u64 = 0;

// Event for loop iteration
~event count { i: u64 }
| next { i: u64 }
| done {}

~proc count {
    if (i < 1_000_000) {
        return .{ .next = .{ .i = i + 1 } };
    } else {
        return .{ .done = .{} };
    }
}

// Tap target - simulates logging/metrics
~event tap_point { i: u64 }
| done {}

~proc tap_point {
    tap_count += 1;
    // Touch parameter to avoid unused warning
    if (i > 999_999_999) {
        std.debug.print("Impossible\n", .{});
    }
    return .{ .done = .{} };
}

// Print result
~event print_result {}
| done {}

~proc print_result {
    std.debug.print("Taps called: {}\n", .{tap_count});
    return .{ .done = .{} };
}

// Taps: Observe every 5th iteration (5 tap points)
~count -> * | next n where n.i % 200_000 == 0 |> tap_point(i: n.i) | done |> _
~count -> * | next n where n.i % 200_001 == 0 |> tap_point(i: n.i) | done |> _
~count -> * | next n where n.i % 200_002 == 0 |> tap_point(i: n.i) | done |> _
~count -> * | next n where n.i % 200_003 == 0 |> tap_point(i: n.i) | done |> _
~count -> * | next n where n.i % 200_004 == 0 |> tap_point(i: n.i) | done |> _

// Main loop
~#loop count(i: 0)
| next n |> @loop(i: n.i)
| done |> print_result() | done |> _
input.kz

Test Configuration

MUST_RUN

Post-validation Script:

#!/bin/bash
# Post-validation: Check performance is within threshold

set -e

if [ ! -f "results.json" ]; then
    echo "⚠️  No benchmark results found (results.json missing)"
    echo "   Running benchmark..."
    bash benchmark.sh
fi

if [ ! -f "results.json" ]; then
    echo "❌ FAIL: Benchmark did not produce results.json"
    exit 1
fi

# Check if jq is installed
if ! command -v jq &> /dev/null; then
    echo "⚠️  jq not installed (needed to parse benchmark results)"
    echo "   Install with: brew install jq (macOS) or apt install jq (Linux)"
    echo "   Skipping performance validation..."
    exit 0
fi

THRESHOLD=$(cat THRESHOLD)

# Parse results (hyperfine format)
BASELINE_TIME=$(jq -r '.results[0].mean' results.json)
KORU_TIME=$(jq -r '.results[1].mean' results.json)

# Calculate ratio (Koru / Baseline)
RATIO=$(echo "scale=4; $KORU_TIME / $BASELINE_TIME" | bc -l)

echo ""
echo "Performance Results:"
echo "  Baseline (Zig): ${BASELINE_TIME}s"
echo "  Koru:           ${KORU_TIME}s"
echo "  Ratio:          ${RATIO}x"
echo "  Threshold:      ${THRESHOLD}x"
echo ""

# Compare to threshold
if (( $(echo "$RATIO > $THRESHOLD" | bc -l) )); then
    echo "❌ PERFORMANCE REGRESSION!"
    echo "   Koru is ${RATIO}x slower than baseline"
    echo "   Threshold is ${THRESHOLD}x"
    echo "   Regression: $(echo "scale=1; ($RATIO - 1) * 100" | bc -l)%"
    exit 1
elif (( $(echo "$RATIO < 0.95" | bc -l) )); then
    echo "✅ PERFORMANCE IMPROVED!"
    echo "   Koru is FASTER than baseline (${RATIO}x)"
else
    echo "✅ Performance within threshold"
    echo "   Overhead: $(echo "scale=1; ($RATIO - 1) * 100" | bc -l)%"
fi

exit 0