Moonshark/core/workers/workers_test.go
2025-03-06 06:23:17 -06:00

446 lines
11 KiB
Go

package workers
import (
"context"
"testing"
"time"
luajit "git.sharkk.net/Sky/LuaJIT-to-Go"
)
// This helper function creates real LuaJIT bytecode for our tests. Instead of using
// mocks, we compile actual Lua code into bytecode just like we would in production.
func createTestBytecode(t *testing.T, code string) []byte {
state := luajit.New()
if state == nil {
t.Fatal("Failed to create Lua state")
}
defer state.Close()
bytecode, err := state.CompileBytecode(code, "test")
if err != nil {
t.Fatalf("Failed to compile test bytecode: %v", err)
}
return bytecode
}
// This test makes sure we can create a worker pool with a valid number of workers,
// and that we properly reject attempts to create a pool with zero or negative workers.
func TestNewPool(t *testing.T) {
tests := []struct {
name string
workers int
expectErr bool
}{
{"valid workers", 4, false},
{"zero workers", 0, true},
{"negative workers", -1, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pool, err := NewPool(tt.workers)
if tt.expectErr {
if err == nil {
t.Errorf("Expected error for %d workers, got nil", tt.workers)
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if pool == nil {
t.Errorf("Expected non-nil pool")
} else {
pool.Shutdown()
}
}
})
}
}
// Here we're testing the basic job submission flow. We run a simple Lua script
// that returns the number 42 and make sure we get that same value back from the worker pool.
func TestPoolSubmit(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
bytecode := createTestBytecode(t, "return 42")
result, err := pool.Submit(bytecode, nil)
if err != nil {
t.Fatalf("Failed to submit job: %v", err)
}
num, ok := result.(float64)
if !ok {
t.Fatalf("Expected float64 result, got %T", result)
}
if num != 42 {
t.Errorf("Expected 42, got %f", num)
}
}
// This test checks how our worker pool handles timeouts. We run a script that takes
// some time to complete and verify two scenarios: one where the timeout is long enough
// for successful completion, and another where we expect the operation to be canceled
// due to a short timeout.
func TestPoolSubmitWithContext(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
// Create bytecode that sleeps
bytecode := createTestBytecode(t, `
-- Sleep for 500ms
local start = os.time()
while os.difftime(os.time(), start) < 0.5 do end
return "done"
`)
// Test with timeout that should succeed
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
result, err := pool.SubmitWithContext(ctx, bytecode, nil)
if err != nil {
t.Fatalf("Unexpected error with sufficient timeout: %v", err)
}
if result != "done" {
t.Errorf("Expected 'done', got %v", result)
}
// Test with timeout that should fail
ctx, cancel = context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
_, err = pool.SubmitWithContext(ctx, bytecode, nil)
if err == nil {
t.Errorf("Expected timeout error, got nil")
}
}
// We need to make sure we can pass different types of context values from Go to Lua and
// get them back properly. This test sends numbers, strings, booleans, and arrays to
// a Lua script and verifies they're all handled correctly in both directions.
func TestContextValues(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
bytecode := createTestBytecode(t, `
return {
num = ctx.number,
str = ctx.text,
flag = ctx.enabled,
list = {ctx.table[1], ctx.table[2], ctx.table[3]},
}
`)
execCtx := NewContext()
execCtx.Set("number", 42.5)
execCtx.Set("text", "hello")
execCtx.Set("enabled", true)
execCtx.Set("table", []float64{10, 20, 30})
result, err := pool.Submit(bytecode, execCtx)
if err != nil {
t.Fatalf("Failed to submit job: %v", err)
}
// Result should be a map
resultMap, ok := result.(map[string]any)
if !ok {
t.Fatalf("Expected map result, got %T", result)
}
// Check values
if resultMap["num"] != 42.5 {
t.Errorf("Expected num=42.5, got %v", resultMap["num"])
}
if resultMap["str"] != "hello" {
t.Errorf("Expected str=hello, got %v", resultMap["str"])
}
if resultMap["flag"] != true {
t.Errorf("Expected flag=true, got %v", resultMap["flag"])
}
arr, ok := resultMap["list"].([]float64)
if !ok {
t.Fatalf("Expected []float64, got %T", resultMap["list"])
}
expected := []float64{10, 20, 30}
for i, v := range expected {
if arr[i] != v {
t.Errorf("Expected list[%d]=%f, got %f", i, v, arr[i])
}
}
}
// Test context with nested data structures
func TestNestedContext(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
bytecode := createTestBytecode(t, `
return {
id = ctx.params.id,
name = ctx.params.name,
method = ctx.request.method,
path = ctx.request.path
}
`)
execCtx := NewContext()
// Set nested params
params := map[string]any{
"id": "123",
"name": "test",
}
execCtx.Set("params", params)
// Set nested request info
request := map[string]any{
"method": "GET",
"path": "/api/test",
}
execCtx.Set("request", request)
result, err := pool.Submit(bytecode, execCtx)
if err != nil {
t.Fatalf("Failed to submit job: %v", err)
}
// Result should be a map
resultMap, ok := result.(map[string]any)
if !ok {
t.Fatalf("Expected map result, got %T", result)
}
if resultMap["id"] != "123" {
t.Errorf("Expected id=123, got %v", resultMap["id"])
}
if resultMap["name"] != "test" {
t.Errorf("Expected name=test, got %v", resultMap["name"])
}
if resultMap["method"] != "GET" {
t.Errorf("Expected method=GET, got %v", resultMap["method"])
}
if resultMap["path"] != "/api/test" {
t.Errorf("Expected path=/api/test, got %v", resultMap["path"])
}
}
// A key requirement for our worker pool is that we don't leak state between executions.
// This test confirms that by setting a global variable in one job and then checking
// that it's been cleared before the next job runs on the same worker.
func TestStateReset(t *testing.T) {
pool, err := NewPool(1) // Use 1 worker to ensure same state is reused
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
// First job sets a global
bytecode1 := createTestBytecode(t, `
global_var = "should be cleared"
return true
`)
// Second job checks if global exists
bytecode2 := createTestBytecode(t, `
return global_var ~= nil
`)
// Run first job
_, err = pool.Submit(bytecode1, nil)
if err != nil {
t.Fatalf("Failed to submit first job: %v", err)
}
// Run second job
result, err := pool.Submit(bytecode2, nil)
if err != nil {
t.Fatalf("Failed to submit second job: %v", err)
}
// Global should be cleared
if result.(bool) {
t.Errorf("Expected global_var to be cleared, but it still exists")
}
}
// Let's make sure our pool shuts down cleanly. This test confirms that jobs work
// before shutdown, that we get the right error when trying to submit after shutdown,
// and that we properly handle attempts to shut down an already closed pool.
func TestPoolShutdown(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
// Submit a job to verify pool works
bytecode := createTestBytecode(t, "return 42")
_, err = pool.Submit(bytecode, nil)
if err != nil {
t.Fatalf("Failed to submit job: %v", err)
}
// Shutdown
if err := pool.Shutdown(); err != nil {
t.Errorf("Shutdown failed: %v", err)
}
// Submit after shutdown should fail
_, err = pool.Submit(bytecode, nil)
if err != ErrPoolClosed {
t.Errorf("Expected ErrPoolClosed, got %v", err)
}
// Second shutdown should return error
if err := pool.Shutdown(); err != ErrPoolClosed {
t.Errorf("Expected ErrPoolClosed on second shutdown, got %v", err)
}
}
// A robust worker pool needs to handle errors gracefully. This test checks various
// error scenarios: invalid bytecode, Lua runtime errors, nil context (which
// should work fine), and unsupported parameter types (which should properly error out).
func TestErrorHandling(t *testing.T) {
pool, err := NewPool(2)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
// Test invalid bytecode
_, err = pool.Submit([]byte("not valid bytecode"), nil)
if err == nil {
t.Errorf("Expected error for invalid bytecode, got nil")
}
// Test Lua runtime error
bytecode := createTestBytecode(t, `
error("intentional error")
return true
`)
_, err = pool.Submit(bytecode, nil)
if err == nil {
t.Errorf("Expected error from Lua error() call, got nil")
}
// Test with nil context
bytecode = createTestBytecode(t, "return ctx == nil")
result, err := pool.Submit(bytecode, nil)
if err != nil {
t.Errorf("Unexpected error with nil context: %v", err)
}
if result.(bool) != true {
t.Errorf("Expected ctx to be nil in Lua, but it wasn't")
}
// Test invalid context value
execCtx := NewContext()
execCtx.Set("param", complex128(1+2i)) // Unsupported type
bytecode = createTestBytecode(t, "return ctx.param")
_, err = pool.Submit(bytecode, execCtx)
if err == nil {
t.Errorf("Expected error for unsupported context value type, got nil")
}
}
// The whole point of a worker pool is concurrent processing, so we need to verify
// it works under load. This test submits multiple jobs simultaneously and makes sure
// they all complete correctly with their own unique results.
func TestConcurrentExecution(t *testing.T) {
const workers = 4
const jobs = 20
pool, err := NewPool(workers)
if err != nil {
t.Fatalf("Failed to create pool: %v", err)
}
defer pool.Shutdown()
// Create bytecode that returns its input
bytecode := createTestBytecode(t, "return ctx.n")
// Run multiple jobs concurrently
results := make(chan int, jobs)
for i := 0; i < jobs; i++ {
i := i // Capture loop variable
go func() {
execCtx := NewContext()
execCtx.Set("n", float64(i))
result, err := pool.Submit(bytecode, execCtx)
if err != nil {
t.Errorf("Job %d failed: %v", i, err)
results <- -1
return
}
num, ok := result.(float64)
if !ok {
t.Errorf("Job %d: expected float64, got %T", i, result)
results <- -1
return
}
results <- int(num)
}()
}
// Collect results
counts := make(map[int]bool)
for i := 0; i < jobs; i++ {
result := <-results
if result != -1 {
counts[result] = true
}
}
// Verify all jobs were processed
if len(counts) != jobs {
t.Errorf("Expected %d unique results, got %d", jobs, len(counts))
}
}
// Test context operations
func TestContext(t *testing.T) {
ctx := NewContext()
// Test Set and Get
ctx.Set("key", "value")
if ctx.Get("key") != "value" {
t.Errorf("Expected value, got %v", ctx.Get("key"))
}
// Test overwriting
ctx.Set("key", 123)
if ctx.Get("key") != 123 {
t.Errorf("Expected 123, got %v", ctx.Get("key"))
}
// Test missing key
if ctx.Get("missing") != nil {
t.Errorf("Expected nil for missing key, got %v", ctx.Get("missing"))
}
}