diff --git a/Runner/suites/Kernel/Scheduler/PELT_config/PELT_config.yaml b/Runner/suites/Kernel/Scheduler/PELT_config/PELT_config.yaml new file mode 100644 index 00000000..624c85df --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_config/PELT_config.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-config + format: "Lava-Test Test Definition 1.0" + description: "Validates kernel configuration required for PELT (Per-Entity Load Tracking) scheduler" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_config" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_config.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_config/README.md b/Runner/suites/Kernel/Scheduler/PELT_config/README.md new file mode 100644 index 00000000..e253ab5a --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_config/README.md @@ -0,0 +1,37 @@ +# PELT_config — PELT Kernel Configuration Validation + +## Overview + +Validates that the kernel is built with the configurations required for +**PELT (Per-Entity Load Tracking)**, the Linux CFS scheduler mechanism that +tracks CPU utilization per scheduling entity (tasks and task groups). + +## What Is Tested + +| Config | Required | Purpose | +|---|---|---| +| `CONFIG_SMP` | Yes | Multi-CPU support — PELT load balancing is SMP-only | +| `CONFIG_FAIR_GROUP_SCHED` | Yes | Per-entity load tracking across scheduling groups | +| `CONFIG_SCHED_DEBUG` | Optional | Enables `/sys/kernel/debug/sched` and `/proc//sched` | +| `CONFIG_CFS_BANDWIDTH` | Optional | CFS bandwidth control (uses PELT util signals) | +| `CONFIG_NO_HZ_COMMON` | Optional | Tickless kernel — affects PELT decay accuracy | +| `CONFIG_SCHED_AUTOGROUP` | Optional | Automatic task group creation | +| `CONFIG_CGROUP_SCHED` | Optional | cgroup-based scheduling (PELT tracks per cgroup) | +| `CONFIG_CPU_FREQ_GOV_SCHEDUTIL` | Optional | schedutil governor — consumes PELT util_avg | + +## Pass / Fail / Skip Criteria + +- **SKIP**: `/proc/config.gz` not present (CONFIG_IKCONFIG not enabled) +- **FAIL**: `CONFIG_SMP` or `CONFIG_FAIR_GROUP_SCHED` not enabled +- **PASS**: All required configs enabled (optional configs logged as warnings only) + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `/proc/config.gz` (CONFIG_IKCONFIG + CONFIG_IKCONFIG_PROC) +- `grep`, `zgrep` or `gzip` (provided by functestlib) diff --git a/Runner/suites/Kernel/Scheduler/PELT_config/run.sh b/Runner/suites/Kernel/Scheduler/PELT_config/run.sh new file mode 100644 index 00000000..68181a25 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_config/run.sh @@ -0,0 +1,84 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_config" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates kernel configuration required for PELT (Per-Entity Load Tracking)" + +check_dependencies grep + +pass=true + +if [ ! -f /proc/config.gz ]; then + log_warn "/proc/config.gz not found — skipping kernel config checks" + log_warn "Enable CONFIG_IKCONFIG and CONFIG_IKCONFIG_PROC for config validation" + echo "$TESTNAME SKIP" > "$res_file" + exit 0 +fi + +log_info "=== Core PELT / CFS Kernel Configs ===" + +if [ ! -f /proc/config.gz ]; then + log_warn "/proc/config.gz not found — kernel config checks will be skipped" + log_warn "Ensure CONFIG_IKCONFIG and CONFIG_IKCONFIG_PROC are enabled in the kernel" +else + CORE_CONFIGS="CONFIG_FAIR_GROUP_SCHED CONFIG_SMP" + + if ! check_kernel_config "$CORE_CONFIGS"; then + log_fail "Core DMA-BUF kernel config validation failed" + pass=false + else + log_pass "Core DMA-BUF configs available" + fi + + OPTIONAL_CONFIGS="CONFIG_SCHED_DEBUG CONFIG_CFS_BANDWIDTH CONFIG_NO_HZ_COMMON CONFIG_SCHED_AUTOGROUP CONFIG_CGROUP_SCHED CONFIG_CPU_FREQ_GOV_SCHEDUTIL" + + log_info "Checking optional DMA-BUF configurations..." + for cfg in $OPTIONAL_CONFIGS; do + check_optional_config "$cfg" + done +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_decay/PELT_decay.yaml b/Runner/suites/Kernel/Scheduler/PELT_decay/PELT_decay.yaml new file mode 100644 index 00000000..a6187755 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_decay/PELT_decay.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-decay + format: "Lava-Test Test Definition 1.0" + description: "Validates PELT exponential decay: util_avg must decrease after CPU load stops" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_decay" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_decay.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_decay/README.md b/Runner/suites/Kernel/Scheduler/PELT_decay/README.md new file mode 100644 index 00000000..32a299de --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_decay/README.md @@ -0,0 +1,97 @@ +# PELT_decay — PELT Exponential Decay Validation + +## Overview + +Validates that **PELT (Per-Entity Load Tracking) exponential decay** is +functioning correctly in the kernel. PELT uses a geometric series with a +~32 ms half-life to track CPU utilization. After a task stops running, +its `util_avg` must decay toward zero — this is the mechanism that allows +the scheduler and cpufreq governors to reduce CPU frequency after load drops. + +This is the **only** testcase in the PELT suite that explicitly validates +decay. The other tests (`PELT_load_tracking`, `PELT_schedutil`) only validate +accumulation (load going up), not the decay direction. + +## PELT Decay Theory + +``` +util_avg(t) = util_avg(t0) × 0.5^( Δt / 32ms ) + +Half-life = 32 ms (one PELT period = 1024 µs) +After 100ms idle: util_avg → 11.5% of peak +After 200ms idle: util_avg → 1.3% of peak +After 1000ms idle: util_avg → ~0% of peak (< 0.001%) +``` + +## Two Validation Methods + +### Method 1: `/proc/self/sched` `se.avg.util_avg` (Primary) + +**Requires:** `CONFIG_SCHED_DEBUG` + +The test script itself performs a CPU-bound busy loop for ~3 seconds, +saturating its own PELT `util_avg`. It then reads `util_avg` immediately +after the loop (should be high), sleeps 1 second, and reads again (should +be near zero). + +``` +1. Read baseline util_avg (shell is idle → low) +2. Run arithmetic busy loop for ~3s (saturates util_avg → near 1024) +3. Read peak util_avg (should be > 100/1024) +4. Sleep 1 second (~31 PELT half-lives → >99.9% theoretical decay) +5. Read decayed util_avg (should be < peak/2) +6. Assert: decayed_util < peak_util / 2 +``` + +**Pass threshold:** `decayed_util < peak_util / 2` (50% decay after 1s). +This is extremely conservative — theoretical decay after 1s is >99.9%. + +### Method 2: schedutil Frequency Proxy (Secondary) + +**Requires:** `schedutil` cpufreq governor active + +`schedutil` translates PELT `util_avg` into CPU frequency requests. When +`util_avg` decays after load stops, `schedutil` should lower the frequency. + +``` +1. Record idle frequency +2. Spawn background busy loop for 3s → record peak frequency +3. Kill load, wait 2 seconds +4. Record post-decay frequency +5. Assert: post_decay_freq < load_freq +``` + +This method is **informational** — a warning is issued if frequency does +not drop, but it does not cause a FAIL (thermal floors, rate_limit_us, or +platform-specific governor behaviour can prevent immediate frequency drop). + +## Pass / Fail / Skip Criteria + +| Condition | Result | +|---|---| +| Neither method available | SKIP | +| Method 1 available, decay ≥ 50% after 1s | PASS | +| Method 1 available, decay < 50% after 1s | FAIL | +| Method 2 only, frequency dropped after load | PASS (informational) | +| Method 2 only, frequency did not drop | WARN (not FAIL) | + +## Why Existing Tests Don't Cover Decay + +| Test | What it measures | Decay? | +|---|---|---| +| `PELT_schedstat` | `rq_cpu_time` (monotonic counter, never decays) | ✗ | +| `PELT_load_tracking` | `rq_cpu_time` increases under load | ✗ | +| `PELT_schedutil` | Frequency rises under load | ✗ (rise only) | +| **`PELT_decay`** | `util_avg` decreases after load stops | **✓** | + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `/proc/self/sched` — `CONFIG_SCHED_DEBUG` (Method 1) +- `schedutil` governor — `CONFIG_CPU_FREQ_GOV_SCHEDUTIL` (Method 2) +- `grep`, `awk`, `cat`, `date` diff --git a/Runner/suites/Kernel/Scheduler/PELT_decay/run.sh b/Runner/suites/Kernel/Scheduler/PELT_decay/run.sh new file mode 100644 index 00000000..bee55a78 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_decay/run.sh @@ -0,0 +1,307 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_decay" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +# Kill any background load task on exit +LOAD_PID="" +cleanup() { + if [ -n "$LOAD_PID" ]; then + kill "$LOAD_PID" 2>/dev/null || true + LOAD_PID="" + fi +} +trap cleanup EXIT INT TERM + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates PELT exponential decay: util_avg must decrease after load stops" +log_info "" +log_info "PELT decay theory:" +log_info " Half-life = ~32 ms (one PELT period = 1024 us)" +log_info " After 200ms idle: util_avg decays to ~1.3% of peak" +log_info " After 1000ms idle: util_avg decays to ~0% of peak (essentially zero)" + +check_dependencies grep awk cat date + +pass=true +method1_ran=false +method2_ran=false + +# ============================================================================ +# METHOD 1: /proc/self/sched se.avg.util_avg (requires CONFIG_SCHED_DEBUG) +# +# The shell process itself does a CPU-bound busy loop, then sleeps. +# /proc/self/sched exposes the current task's PELT util_avg. +# Reading it triggers a kernel-side PELT update at that instant. +# +# Expected: +# peak_util (immediately after busy loop) >> baseline_util +# decayed_util (after 1s sleep) << peak_util +# ============================================================================ +log_info "========================================================================" +log_info "=== Method 1: /proc/self/sched se.avg.util_avg decay ===" +log_info "========================================================================" + +# Helper: read se.avg.util_avg from /proc/self/sched (integer part) +get_self_util_avg() { + grep "^se\.avg\.util_avg" /proc/self/sched 2>/dev/null \ + | awk '{printf "%d\n", $NF + 0}' +} + +if [ ! -f /proc/self/sched ]; then + log_warn "Method 1 SKIP: /proc/self/sched not present" + log_warn " Enable CONFIG_SCHED_DEBUG in the kernel for per-task PELT visibility" +else + method1_ran=true + + # --- Baseline --- + baseline_util=$(get_self_util_avg) + log_info "Baseline util_avg: $baseline_util / 1024" + + # --- CPU-bound busy loop (3 seconds) --- + # The shell itself does arithmetic work so its own util_avg accumulates. + # We check the time every 50000 iterations to avoid excessive date forks + # while still generating real CPU load in this process. + log_info "Running CPU-bound busy loop for ~3 seconds (saturating PELT)..." + _busy_end=$(( $(date +%s) + 3 )) + _busy_i=0 + while true; do + _busy_i=$(( _busy_i + 1 )) + if [ $(( _busy_i % 50000 )) -eq 0 ]; then + [ "$(date +%s)" -ge "$_busy_end" ] && break + fi + done + log_info "Busy loop complete (iterations: $_busy_i)" + + # --- Read util_avg immediately after busy loop --- + # Some decay may have occurred during the grep/awk fork, but util_avg + # should still be significantly elevated. + peak_util=$(get_self_util_avg) + log_info "Peak util_avg (post-busy-loop): $peak_util / 1024" + + # --- Sleep for PELT decay --- + # 1 second = ~31 PELT half-lives → theoretical decay to <0.001% of peak + # We use a conservative pass threshold of 50% decay (extremely lenient) + log_info "Sleeping 1 second for PELT decay (~31 half-lives)..." + sleep 1 + + # --- Read util_avg after decay --- + decayed_util=$(get_self_util_avg) + log_info "Decayed util_avg (post-1s-sleep): $decayed_util / 1024" + + # --- Evaluate --- + log_info "--- Method 1 Evaluation ---" + + # Check 1: peak_util must be meaningfully elevated after busy loop + # Threshold: > 100/1024 (~10% utilization). Conservative because + # fork/exec overhead during grep may have caused some decay already. + if [ "$peak_util" -gt 100 ] 2>/dev/null; then + log_pass " Peak util_avg ($peak_util) > 100 — PELT accumulated load correctly" + else + log_warn " Peak util_avg ($peak_util) is low — busy loop may not have run long enough" + log_warn " This may indicate slow shell arithmetic on this platform" + log_info " Continuing with decay check regardless..." + fi + + # Check 2: decayed_util must be less than half of peak_util + # After 1s sleep, theoretical decay is >99.9%, so 50% threshold is very lenient + if [ "$peak_util" -gt 0 ] 2>/dev/null; then + # Compute threshold = peak_util / 2 (integer division via awk) + decay_threshold=$(awk "BEGIN {printf \"%d\", $peak_util / 2}") + log_info " Decay threshold (50% of peak): $decay_threshold" + + if [ "$decayed_util" -lt "$decay_threshold" ] 2>/dev/null; then + actual_pct=$(awk "BEGIN {printf \"%d\", (1 - $decayed_util / ($peak_util + 0.001)) * 100}") + log_pass " util_avg decayed from $peak_util → $decayed_util (~${actual_pct}% decay)" + log_pass " PELT exponential decay is functioning correctly" + else + log_fail " util_avg did NOT decay sufficiently after 1s sleep" + log_fail " peak=$peak_util decayed=$decayed_util threshold=<$decay_threshold" + log_fail " Expected >50% decay after 1s; PELT decay may be broken" + pass=false + fi + else + log_warn " peak_util is 0 — cannot evaluate decay ratio" + log_warn " Busy loop may not have generated measurable PELT load" + fi + + log_info " Summary: baseline=$baseline_util peak=$peak_util decayed=$decayed_util" +fi + +# ============================================================================ +# METHOD 2: schedutil frequency proxy (no CONFIG_SCHED_DEBUG required) +# +# schedutil translates PELT util_avg into CPU frequency requests. +# When util_avg decays after load stops, schedutil should lower the frequency. +# +# Expected: +# freq_under_load > freq_idle (schedutil raised freq due to PELT util) +# freq_post_decay < freq_under_load (schedutil lowered freq as PELT decayed) +# ============================================================================ +log_info "========================================================================" +log_info "=== Method 2: schedutil frequency proxy for PELT decay ===" +log_info "========================================================================" + +CPUFREQ_BASE="/sys/devices/system/cpu/cpufreq" + +# Find first policy using schedutil +schedutil_policy="" +for policy_dir in "$CPUFREQ_BASE"/policy*; do + [ -d "$policy_dir" ] || continue + gov_file="$policy_dir/scaling_governor" + if [ -f "$gov_file" ]; then + gov=$(cat "$gov_file" 2>/dev/null) + if [ "$gov" = "schedutil" ]; then + schedutil_policy=$(basename "$policy_dir") + break + fi + fi +done + +if [ -z "$schedutil_policy" ]; then + log_warn "Method 2 SKIP: No CPU policy using schedutil governor found" + log_warn " schedutil is required for PELT-driven frequency scaling" + log_warn " Available governors:" + for policy_dir in "$CPUFREQ_BASE"/policy*; do + [ -d "$policy_dir" ] || continue + avail_file="$policy_dir/scaling_available_governors" + [ -f "$avail_file" ] && log_info " $(basename "$policy_dir"): $(cat "$avail_file" 2>/dev/null)" + done +else + method2_ran=true + policy_dir="$CPUFREQ_BASE/$schedutil_policy" + cur_freq_file="$policy_dir/scaling_cur_freq" + max_freq_file="$policy_dir/scaling_max_freq" + + log_info "Using schedutil policy: $schedutil_policy" + + if [ ! -f "$cur_freq_file" ]; then + log_warn "Method 2 SKIP: scaling_cur_freq not available for $schedutil_policy" + method2_ran=false + else + max_freq=$(cat "$max_freq_file" 2>/dev/null) + + # --- Idle frequency (before load) --- + # Brief settle time to ensure we're reading a stable idle frequency + sleep 1 + idle_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info "Idle frequency: ${idle_freq} kHz (max: ${max_freq} kHz)" + + # Check if already at max — if so, decay test is not meaningful + if [ -n "$idle_freq" ] && [ -n "$max_freq" ] && \ + [ "$idle_freq" -ge "$max_freq" ] 2>/dev/null; then + log_warn "CPU already at max frequency at idle (${idle_freq} kHz)" + log_warn "Method 2 SKIP: frequency cannot increase further; decay test not meaningful" + log_warn " This may be due to performance governor override or thermal state" + method2_ran=false + else + # --- Spawn CPU-bound load --- + log_info "Spawning CPU-bound load task (3 seconds)..." + ( i=0; while true; do i=$((i + 1)); done ) & + LOAD_PID=$! + + sleep 3 + + # --- Read frequency under load --- + load_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info "Frequency under load: ${load_freq} kHz" + + # --- Kill load and wait for PELT decay --- + kill "$LOAD_PID" 2>/dev/null || true + LOAD_PID="" + log_info "Load task killed. Waiting 2 seconds for PELT decay..." + sleep 2 + + # --- Read post-decay frequency --- + postdecay_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info "Post-decay frequency: ${postdecay_freq} kHz" + + # --- Evaluate --- + log_info "--- Method 2 Evaluation ---" + log_info " idle=${idle_freq} load=${load_freq} post_decay=${postdecay_freq} kHz" + + # Check 1: frequency must have risen under load + if [ -n "$load_freq" ] && [ -n "$idle_freq" ] && \ + [ "$load_freq" -gt "$idle_freq" ] 2>/dev/null; then + log_pass " Frequency rose under load: ${idle_freq} → ${load_freq} kHz" + log_pass " schedutil responded to PELT util_avg accumulation" + + # Check 2: frequency must have dropped after load stopped + if [ -n "$postdecay_freq" ] && \ + [ "$postdecay_freq" -lt "$load_freq" ] 2>/dev/null; then + log_pass " Frequency dropped after load: ${load_freq} → ${postdecay_freq} kHz" + log_pass " schedutil responded to PELT util_avg decay" + else + log_warn " Frequency did not drop after load stopped" + log_warn " load=${load_freq} post_decay=${postdecay_freq} kHz" + log_warn " Possible causes: rate_limit_us too high, thermal floor, or" + log_warn " 2s decay window insufficient for this platform's schedutil config" + log_info " This is a WARNING only — Method 1 is the authoritative decay check" + fi + else + log_warn " Frequency did not rise under load (idle=${idle_freq} load=${load_freq} kHz)" + log_warn " CPU may already be at max freq, or schedutil rate_limit_us is high" + log_info " Method 2 inconclusive — not counted as failure" + fi + fi + fi +fi + +# ============================================================================ +# Final verdict +# ============================================================================ +log_info "========================================================================" +log_info "=== Final Verdict ===" +log_info "========================================================================" + +if ! $method1_ran && ! $method2_ran; then + log_warn "Neither Method 1 nor Method 2 could run on this platform" + log_warn " Method 1 requires: CONFIG_SCHED_DEBUG (/proc/self/sched)" + log_warn " Method 2 requires: schedutil cpufreq governor" + log_warn "Marking as SKIP" + echo "$TESTNAME SKIP" > "$res_file" + exit 0 +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_load_tracking/PELT_load_tracking.yaml b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/PELT_load_tracking.yaml new file mode 100644 index 00000000..85d59578 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/PELT_load_tracking.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-load-tracking + format: "Lava-Test Test Definition 1.0" + description: "Functional validation that PELT tracks CPU utilization under real scheduler load" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_load_tracking" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_load_tracking.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_load_tracking/README.md b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/README.md new file mode 100644 index 00000000..64313be4 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/README.md @@ -0,0 +1,46 @@ +# PELT_load_tracking — PELT Functional Load Tracking Validation + +## Overview + +Functional test that verifies **PELT (Per-Entity Load Tracking) is actively +accounting CPU utilization** by spawning a CPU-bound task and measuring the +change in `/proc/schedstat` counters before and after the load. + +## What Is Tested + +| Check | Description | +|---|---| +| Baseline snapshot | Captures `rq_cpu_time`, `run_delay`, `pcount` from `/proc/schedstat` | +| CPU-bound load | Spawns a POSIX busy-loop task for 3 seconds | +| `rq_cpu_time` increase | Total CPU run time must increase — confirms PELT accounting | +| `pcount` increase | Scheduling event count must increase | +| Per-CPU breakdown | Logs per-CPU `rq_cpu_time` after load for triage | +| Load average | Logs `/proc/loadavg` before and after (informational) | + +## Test Methodology + +``` +1. Read baseline /proc/schedstat (sum rq_cpu_time across all CPUs) +2. Fork a POSIX busy-loop: ( i=0; while true; do i=$((i+1)); done ) & +3. Sleep 3 seconds (PELT half-life ~32 ms — 3s is ample to accumulate) +4. Read post-load /proc/schedstat +5. Kill load task +6. Assert: post_rq_cpu_time > baseline_rq_cpu_time +``` + +## Pass / Fail Criteria + +- **FAIL**: `/proc/schedstat` missing, or `rq_cpu_time` did not increase after load +- **PASS**: `rq_cpu_time` increased — PELT is tracking CPU utilization + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `/proc/schedstat` (CONFIG_SCHEDSTATS) +- `/proc/loadavg` +- `grep`, `awk`, `cat` diff --git a/Runner/suites/Kernel/Scheduler/PELT_load_tracking/run.sh b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/run.sh new file mode 100644 index 00000000..a3bdc612 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_load_tracking/run.sh @@ -0,0 +1,177 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_load_tracking" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +# Kill any background load task on exit +LOAD_PID="" +cleanup() { + if [ -n "$LOAD_PID" ]; then + kill "$LOAD_PID" 2>/dev/null || true + LOAD_PID="" + fi +} +trap cleanup EXIT INT TERM + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Functional validation: PELT tracks CPU utilization under real scheduler load" + +check_dependencies grep awk cat + +pass=true + +# --------------------------------------------------------------------------- +# Prerequisite: /proc/schedstat must be present +# --------------------------------------------------------------------------- +if [ ! -f /proc/schedstat ]; then + log_fail "/proc/schedstat not found — cannot perform PELT load tracking test" + log_warn "Enable CONFIG_SCHEDSTATS in the kernel" + echo "$TESTNAME FAIL" > "$res_file" + exit 1 +fi + +# Helper: sum rq_cpu_time (field 8) across all CPU lines — total ns on-CPU +get_total_rq_cpu_time() { + awk '/^cpu[0-9]/ {sum += $8} END {print sum+0}' /proc/schedstat +} + +# Helper: sum run_delay (field 9) — total ns waiting in runqueue +get_total_run_delay() { + awk '/^cpu[0-9]/ {sum += $9} END {print sum+0}' /proc/schedstat +} + +# Helper: sum pcount (field 10) — total scheduling events +get_total_pcount() { + awk '/^cpu[0-9]/ {sum += $10} END {print sum+0}' /proc/schedstat +} + +# --------------------------------------------------------------------------- +# Baseline snapshot +# --------------------------------------------------------------------------- +log_info "=== Baseline PELT Snapshot ===" + +baseline_runtime=$(get_total_rq_cpu_time) +baseline_delay=$(get_total_run_delay) +baseline_pcount=$(get_total_pcount) +baseline_load=$(awk '{print $1}' /proc/loadavg 2>/dev/null) + +log_info " rq_cpu_time : ${baseline_runtime} ns" +log_info " run_delay : ${baseline_delay} ns" +log_info " pcount : ${baseline_pcount}" +log_info " loadavg(1m) : ${baseline_load}" + +# --------------------------------------------------------------------------- +# Spawn CPU-bound load (POSIX busy loop — no bashisms) +# PELT half-life ~32 ms; 3 seconds is more than enough to accumulate load +# --------------------------------------------------------------------------- +log_info "=== Spawning CPU-bound task (3 seconds) ===" + +( i=0; while true; do i=$((i + 1)); done ) & +LOAD_PID=$! +log_info "Load task PID: $LOAD_PID" + +sleep 3 + +# --------------------------------------------------------------------------- +# Post-load snapshot (before killing the task) +# --------------------------------------------------------------------------- +log_info "=== Post-load PELT Snapshot ===" + +postload_runtime=$(get_total_rq_cpu_time) +postload_delay=$(get_total_run_delay) +postload_pcount=$(get_total_pcount) +postload_load=$(awk '{print $1}' /proc/loadavg 2>/dev/null) + +log_info " rq_cpu_time : ${postload_runtime} ns" +log_info " run_delay : ${postload_delay} ns" +log_info " pcount : ${postload_pcount}" +log_info " loadavg(1m) : ${postload_load}" + +# Kill load task now — before evaluating results +kill "$LOAD_PID" 2>/dev/null || true +LOAD_PID="" + +# --------------------------------------------------------------------------- +# Evaluate: rq_cpu_time must have increased +# --------------------------------------------------------------------------- +log_info "=== PELT Load Tracking Evaluation ===" + +if [ "$postload_runtime" -gt "$baseline_runtime" ] 2>/dev/null; then + delta_runtime=$((postload_runtime - baseline_runtime)) + log_pass "rq_cpu_time increased by ${delta_runtime} ns — PELT is tracking CPU utilization" +else + log_fail "rq_cpu_time did not increase (baseline=${baseline_runtime} post=${postload_runtime})" + log_fail "PELT is not accounting CPU run time correctly" + pass=false +fi + +# pcount (scheduling events) must have increased +if [ "$postload_pcount" -gt "$baseline_pcount" ] 2>/dev/null; then + delta_pcount=$((postload_pcount - baseline_pcount)) + log_pass "pcount increased by ${delta_pcount} — scheduler dispatched new tasks" +else + log_warn "pcount did not increase (baseline=${baseline_pcount} post=${postload_pcount})" +fi + +# Per-CPU breakdown after load +log_info "--- Per-CPU rq_cpu_time after load ---" +while IFS= read -r line; do + case "$line" in + cpu[0-9]*) + cpu_id=$(printf '%s\n' "$line" | awk '{print $1}') + cpu_rt=$(printf '%s\n' "$line" | awk '{print $8}') + log_info " $cpu_id: rq_cpu_time=${cpu_rt} ns" + ;; + esac +done < /proc/schedstat + +# --------------------------------------------------------------------------- +# Load average check (PELT-derived — may lag due to 1-min window) +# --------------------------------------------------------------------------- +log_info "=== Load Average Check ===" +log_info " Baseline loadavg(1m): $baseline_load" +log_info " Post-load loadavg(1m): $postload_load" +log_info " Note: 1-min load average has a long decay window; immediate change not guaranteed" + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_sched_debug/PELT_sched_debug.yaml b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/PELT_sched_debug.yaml new file mode 100644 index 00000000..c873fb0a --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/PELT_sched_debug.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-sched-debug + format: "Lava-Test Test Definition 1.0" + description: "Validates the scheduler debugfs interface for PELT state inspection" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_sched_debug" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_sched_debug.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_sched_debug/README.md b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/README.md new file mode 100644 index 00000000..d0fc9100 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/README.md @@ -0,0 +1,46 @@ +# PELT_sched_debug — Scheduler Debugfs Interface Validation + +## Overview + +Validates the **scheduler debugfs interface** (`/sys/kernel/debug/sched`) +which exposes PELT internal state, scheduler feature flags, scheduling domain +topology, and per-task load tracking detail. + +## What Is Tested + +| Check | Description | +|---|---| +| debugfs mount | Verifies `/sys/kernel/debug` is mounted; attempts mount if root | +| `sched/` directory | Must exist (requires CONFIG_SCHED_DEBUG) | +| `sched/features` | Scheduler feature flags logged; PELT-relevant flags checked | +| `sched/domains/` | Scheduling domain topology per CPU logged | +| Domain properties | `name`, `flags`, `min/max_interval`, `imbalance_pct` per domain | +| `/proc/self/sched` | Per-task PELT fields: `se.avg.util_avg`, `se.avg.load_avg`, etc. | + +## PELT-Relevant Scheduler Features + +| Feature | Description | +|---|---| +| `UTIL_EST` | Utilization estimation (smoothed PELT util for wakeup) | +| `NONTASK_CAPACITY` | Account non-task CPU capacity in PELT | +| `WAKEUP_PREEMPTION` | Preempt on wakeup using PELT vruntime | +| `GENTLE_FAIR_SLEEPERS` | Limit vruntime catch-up for sleepers | +| `TTWU_QUEUE` | Queue wake-up across CPUs | + +## Pass / Fail / Skip Criteria + +- **SKIP**: debugfs not mounted and not running as root +- **FAIL**: `/sys/kernel/debug/sched` missing (CONFIG_SCHED_DEBUG not enabled) +- **PASS**: Scheduler debugfs directory present and readable + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `CONFIG_SCHED_DEBUG` kernel config +- debugfs mounted at `/sys/kernel/debug` +- `grep`, `awk`, `cat` diff --git a/Runner/suites/Kernel/Scheduler/PELT_sched_debug/run.sh b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/run.sh new file mode 100644 index 00000000..906601b2 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_sched_debug/run.sh @@ -0,0 +1,190 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_sched_debug" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates the scheduler debugfs interface used to inspect PELT state" + +check_dependencies grep awk cat + +pass=true + +SCHED_DEBUG_DIR="/sys/kernel/debug/sched" + +# --------------------------------------------------------------------------- +# Ensure debugfs is mounted; attempt to mount if root and not present +# --------------------------------------------------------------------------- +log_info "=== debugfs Mount Check ===" + +if [ ! -d /sys/kernel/debug ]; then + if [ "$(id -u 2>/dev/null)" = "0" ]; then + log_info "debugfs not mounted — attempting to mount..." + if mount -t debugfs debugfs /sys/kernel/debug 2>/dev/null; then + log_pass "debugfs mounted successfully" + else + log_warn "Could not mount debugfs — scheduler debug checks will be skipped" + echo "$TESTNAME SKIP" > "$res_file" + exit 0 + fi + else + log_warn "debugfs not mounted and not running as root — skipping" + echo "$TESTNAME SKIP" > "$res_file" + exit 0 + fi +else + log_pass "debugfs is mounted at /sys/kernel/debug" +fi + +# --------------------------------------------------------------------------- +# /sys/kernel/debug/sched directory +# --------------------------------------------------------------------------- +log_info "=== Scheduler Debugfs Directory ===" + +if [ ! -d "$SCHED_DEBUG_DIR" ]; then + log_fail "$SCHED_DEBUG_DIR not found" + log_warn "Enable CONFIG_SCHED_DEBUG in the kernel" + echo "$TESTNAME FAIL" > "$res_file" + exit 1 +fi + +log_pass "Scheduler debugfs present: $SCHED_DEBUG_DIR" + +# List all entries for reference +log_info "Contents of $SCHED_DEBUG_DIR:" +for entry in "$SCHED_DEBUG_DIR"/*; do + [ -e "$entry" ] || continue + entry_name=$(basename "$entry") + if [ -d "$entry" ]; then + log_info " [dir] $entry_name" + else + log_info " [file] $entry_name" + fi +done + +# --------------------------------------------------------------------------- +# /sys/kernel/debug/sched/features — scheduler feature flags +# --------------------------------------------------------------------------- +log_info "=== Scheduler Feature Flags ===" + +if [ -f "$SCHED_DEBUG_DIR/features" ]; then + sched_features=$(cat "$SCHED_DEBUG_DIR/features" 2>/dev/null) + log_pass "sched/features readable" + log_info "Active features: $sched_features" + + # PELT-relevant feature flags + for feat in GENTLE_FAIR_SLEEPERS START_DEBIT NEXT_BUDDY LAST_BUDDY \ + CACHE_HOT_BUDDY WAKEUP_PREEMPTION UTIL_EST \ + NONTASK_CAPACITY TTWU_QUEUE; do + if printf '%s\n' "$sched_features" | grep -q "$feat"; then + log_pass " $feat: active" + else + log_info " $feat: not active" + fi + done +else + log_warn "$SCHED_DEBUG_DIR/features not readable (may need CONFIG_SCHED_DEBUG)" +fi + +# --------------------------------------------------------------------------- +# /sys/kernel/debug/sched/domains — scheduling domain topology +# --------------------------------------------------------------------------- +log_info "=== Scheduling Domains ===" + +if [ -d "$SCHED_DEBUG_DIR/domains" ]; then + log_pass "sched/domains present" + total_domains=0 + + for cpu_dir in "$SCHED_DEBUG_DIR/domains"/cpu*; do + [ -d "$cpu_dir" ] || continue + cpu_name=$(basename "$cpu_dir") + + for dom_dir in "$cpu_dir"/domain*; do + [ -d "$dom_dir" ] || continue + dom_name=$(basename "$dom_dir") + total_domains=$((total_domains + 1)) + + # Log key domain properties relevant to PELT load balancing + for prop in name flags min_interval max_interval busy_factor \ + imbalance_pct cache_nice_tries; do + prop_file="$dom_dir/$prop" + if [ -f "$prop_file" ]; then + val=$(cat "$prop_file" 2>/dev/null) + log_info " $cpu_name/$dom_name/$prop = $val" + fi + done + done + done + + if [ "$total_domains" -gt 0 ]; then + log_pass "Found $total_domains scheduling domain entries" + else + log_warn "No domain entries found under sched/domains" + fi +else + log_warn "sched/domains not present (may be kernel version dependent)" +fi + +# --------------------------------------------------------------------------- +# /proc/self/sched — per-task PELT detail (requires CONFIG_SCHED_DEBUG) +# --------------------------------------------------------------------------- +log_info "=== Per-Task PELT Detail (/proc/self/sched) ===" + +if [ -f /proc/self/sched ]; then + log_pass "/proc/self/sched present" + + # Extract key PELT fields + for field in "se.load.weight" "se.avg.load_avg" "se.avg.util_avg" \ + "se.avg.runnable_avg" "nr_voluntary_switches" \ + "nr_involuntary_switches" "policy" "prio"; do + val=$(grep "^${field}" /proc/self/sched 2>/dev/null | awk '{print $NF}') + if [ -n "$val" ]; then + log_info " $field = $val" + fi + done +else + log_warn "/proc/self/sched not present (CONFIG_SCHED_DEBUG may be disabled)" +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedstat/PELT_schedstat.yaml b/Runner/suites/Kernel/Scheduler/PELT_schedstat/PELT_schedstat.yaml new file mode 100644 index 00000000..8f9063ed --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedstat/PELT_schedstat.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-schedstat + format: "Lava-Test Test Definition 1.0" + description: "Validates /proc/schedstat PELT per-CPU runtime accounting interface" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_schedstat" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_schedstat.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedstat/README.md b/Runner/suites/Kernel/Scheduler/PELT_schedstat/README.md new file mode 100644 index 00000000..0536b3b6 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedstat/README.md @@ -0,0 +1,47 @@ +# PELT_schedstat — PELT /proc/schedstat Interface Validation + +## Overview + +Validates `/proc/schedstat`, the kernel interface that exposes +**per-CPU PELT runtime accounting** data. This includes CPU run time, +run-queue wait delay, and scheduling event counts per CPU. + +## What Is Tested + +| Check | Description | +|---|---| +| `/proc/schedstat` presence | File must exist (requires CONFIG_SCHEDSTATS) | +| Version field | Schedstat format version is logged | +| Per-CPU line format | Each `cpuN` line must have ≥ 10 fields | +| `rq_cpu_time` non-zero | Total CPU run time must be > 0 (PELT accounting active) | +| Scheduling domain lines | `domain*` lines logged (informational) | +| `/proc/self/schedstat` | Per-task exec time, wait time, timeslice count | +| `/proc/loadavg` | PELT-derived 1/5/15-min load averages | + +## /proc/schedstat Field Reference (v15) + +``` +cpuN yld_count yld_act_count sched_count sched_goidle + ttwu_count ttwu_local rq_cpu_time run_delay pcount +``` + +| Field | Description | +|---|---| +| `rq_cpu_time` (field 8) | Total ns tasks spent running on this CPU | +| `run_delay` (field 9) | Total ns tasks spent waiting in runqueue | +| `pcount` (field 10) | Number of tasks that have run on this CPU | + +## Pass / Fail Criteria + +- **FAIL**: `/proc/schedstat` missing, no `cpu*` lines, or `rq_cpu_time` is zero +- **PASS**: All CPU lines valid and `rq_cpu_time` > 0 + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `grep`, `awk`, `cat` diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedstat/run.sh b/Runner/suites/Kernel/Scheduler/PELT_schedstat/run.sh new file mode 100644 index 00000000..701882ea --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedstat/run.sh @@ -0,0 +1,171 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_schedstat" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates /proc/schedstat — the PELT per-CPU runtime accounting interface" + +check_dependencies grep awk cat + +pass=true + +# --------------------------------------------------------------------------- +# /proc/schedstat — existence and version +# --------------------------------------------------------------------------- +log_info "=== /proc/schedstat Interface ===" + +if [ ! -f /proc/schedstat ]; then + log_fail "/proc/schedstat not found" + log_warn "Enable CONFIG_SCHEDSTATS in the kernel to expose PELT accounting" + echo "$TESTNAME FAIL" > "$res_file" + exit 1 +fi + +schedstat_ver=$(awk '/^version/ {print $2}' /proc/schedstat) +log_pass "/proc/schedstat present (version $schedstat_ver)" + +# --------------------------------------------------------------------------- +# Per-CPU line format validation +# schedstat v15 cpu line: +# cpu yld_count yld_act_count sched_count sched_goidle +# ttwu_count ttwu_local rq_cpu_time run_delay pcount +# Fields: 10 total (including the "cpuN" label) +# rq_cpu_time = field 8, run_delay = field 9, pcount = field 10 +# --------------------------------------------------------------------------- +log_info "=== Per-CPU Line Validation ===" + +cpu_count=0 +bad_lines=0 + +while IFS= read -r line; do + case "$line" in + cpu[0-9]*) + cpu_count=$((cpu_count + 1)) + field_count=$(printf '%s\n' "$line" | awk '{print NF}') + if [ "$field_count" -lt 10 ]; then + log_warn " Unexpected field count ($field_count) on: $line" + bad_lines=$((bad_lines + 1)) + else + cpu_id=$(printf '%s\n' "$line" | awk '{print $1}') + rq_cpu_time=$(printf '%s\n' "$line" | awk '{print $8}') + run_delay=$(printf '%s\n' "$line" | awk '{print $9}') + pcount=$(printf '%s\n' "$line" | awk '{print $10}') + log_info " $cpu_id: rq_cpu_time=${rq_cpu_time}ns run_delay=${run_delay}ns pcount=$pcount" + fi + ;; + esac +done < /proc/schedstat + +if [ "$cpu_count" -eq 0 ]; then + log_fail "No cpu* lines found in /proc/schedstat" + pass=false +elif [ "$bad_lines" -gt 0 ]; then + log_fail "$bad_lines cpu line(s) had unexpected format" + pass=false +else + log_pass "All $cpu_count CPU lines have valid format" +fi + +# --------------------------------------------------------------------------- +# Aggregate totals +# --------------------------------------------------------------------------- +log_info "=== Aggregate PELT Accounting ===" + +total_runtime=$(awk '/^cpu[0-9]/ {sum += $8} END {print sum+0}' /proc/schedstat) +total_delay=$(awk '/^cpu[0-9]/ {sum += $9} END {print sum+0}' /proc/schedstat) +total_pcount=$(awk '/^cpu[0-9]/ {sum += $10} END {print sum+0}' /proc/schedstat) + +log_info " Total rq_cpu_time : ${total_runtime} ns" +log_info " Total run_delay : ${total_delay} ns" +log_info " Total pcount : ${total_pcount}" + +if [ "$total_runtime" -gt 0 ] 2>/dev/null; then + log_pass "rq_cpu_time is non-zero — PELT CPU accounting is active" +else + log_fail "rq_cpu_time is zero — scheduler may not be accounting CPU time" + pass=false +fi + +# --------------------------------------------------------------------------- +# Scheduling domain lines (present in schedstat alongside cpu lines) +# --------------------------------------------------------------------------- +log_info "=== Scheduling Domain Lines ===" + +domain_count=$(grep -c "^domain" /proc/schedstat 2>/dev/null || echo 0) +if [ "$domain_count" -gt 0 ]; then + log_pass "Found $domain_count scheduling domain line(s) in /proc/schedstat" +else + log_warn "No domain lines found in /proc/schedstat (may be normal on some kernels)" +fi + +# --------------------------------------------------------------------------- +# /proc/self/schedstat — per-task PELT stats +# --------------------------------------------------------------------------- +log_info "=== Per-Task schedstat (/proc/self/schedstat) ===" + +if [ -f /proc/self/schedstat ]; then + self_stat=$(cat /proc/self/schedstat) + exec_ns=$(printf '%s\n' "$self_stat" | awk '{print $1}') + wait_ns=$(printf '%s\n' "$self_stat" | awk '{print $2}') + timeslices=$(printf '%s\n' "$self_stat" | awk '{print $3}') + log_pass "/proc/self/schedstat: exec_ns=$exec_ns wait_ns=$wait_ns timeslices=$timeslices" +else + log_warn "/proc/self/schedstat not found (CONFIG_SCHEDSTATS may be disabled)" +fi + +# --------------------------------------------------------------------------- +# /proc/loadavg — PELT-derived system load +# --------------------------------------------------------------------------- +log_info "=== /proc/loadavg (PELT-derived load averages) ===" + +if [ -f /proc/loadavg ]; then + loadavg=$(cat /proc/loadavg) + log_pass "/proc/loadavg: $loadavg" +else + log_fail "/proc/loadavg not found" + pass=false +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedutil/PELT_schedutil.yaml b/Runner/suites/Kernel/Scheduler/PELT_schedutil/PELT_schedutil.yaml new file mode 100644 index 00000000..509d4bc9 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedutil/PELT_schedutil.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-schedutil + format: "Lava-Test Test Definition 1.0" + description: "Validates schedutil cpufreq governor integration with PELT utilization signals" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_schedutil" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_schedutil.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedutil/README.md b/Runner/suites/Kernel/Scheduler/PELT_schedutil/README.md new file mode 100644 index 00000000..8e796f87 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedutil/README.md @@ -0,0 +1,55 @@ +# PELT_schedutil — Schedutil Governor PELT Integration Validation + +## Overview + +Validates the **schedutil cpufreq governor** integration with PELT utilization +signals. Schedutil is the primary governor that translates PELT's per-CPU +`util_avg` into CPU frequency requests, making it the key interface between +the scheduler and the power management subsystem. + +## What Is Tested + +| Check | Description | +|---|---| +| Governor detection | Identifies all CPU policies using `schedutil` | +| Frequency range | `scaling_min_freq` ≤ `scaling_max_freq` per policy | +| `rate_limit_us` | Schedutil-specific update rate limit (informational) | +| Available frequencies | Logged per policy | +| Frequency response | CPU frequency increases under CPU-bound load | +| Per-CPU util | `/sys/devices/system/cpu/cpuN/cpufreq/util` (if present) | + +## Test Methodology + +``` +1. Scan /sys/devices/system/cpu/cpufreq/policy*/scaling_governor +2. For each schedutil policy: validate freq range, log rate_limit_us +3. Pick first schedutil policy +4. Record idle frequency +5. Spawn POSIX busy-loop for 3 seconds +6. Record under-load frequency +7. Assert: load_freq >= idle_freq (schedutil raised freq in response to PELT util) +``` + +## Pass / Fail / Skip Criteria + +- **SKIP**: No CPU policies using schedutil governor found +- **FAIL**: Frequency range invalid (`max < min`) +- **PASS**: All schedutil policies valid; frequency responded to load (or already at max) + +## Note on Frequency Response + +If the CPU is already at maximum frequency at idle (e.g., performance mode +or thermal headroom), the frequency-under-load check will show no change. +This is logged as a **warning**, not a failure. + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `/sys/devices/system/cpu/cpufreq/` (CONFIG_CPU_FREQ) +- `schedutil` governor (CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +- `grep`, `awk`, `cat` diff --git a/Runner/suites/Kernel/Scheduler/PELT_schedutil/run.sh b/Runner/suites/Kernel/Scheduler/PELT_schedutil/run.sh new file mode 100644 index 00000000..dc615339 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_schedutil/run.sh @@ -0,0 +1,226 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_schedutil" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +# Kill any background load task on exit +LOAD_PID="" +cleanup() { + if [ -n "$LOAD_PID" ]; then + kill "$LOAD_PID" 2>/dev/null || true + LOAD_PID="" + fi +} +trap cleanup EXIT INT TERM + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates schedutil cpufreq governor integration with PELT utilization signals" + +check_dependencies grep awk cat + +pass=true +schedutil_cpus="" + +CPUFREQ_BASE="/sys/devices/system/cpu/cpufreq" +CPU_BASE="/sys/devices/system/cpu" + +# --------------------------------------------------------------------------- +# Discover CPUs using schedutil governor +# --------------------------------------------------------------------------- +log_info "=== Schedutil Governor Detection ===" + +for policy_dir in "$CPUFREQ_BASE"/policy*; do + [ -d "$policy_dir" ] || continue + policy_name=$(basename "$policy_dir") + gov_file="$policy_dir/scaling_governor" + + if [ -f "$gov_file" ]; then + gov=$(cat "$gov_file" 2>/dev/null) + if [ "$gov" = "schedutil" ]; then + log_pass " $policy_name: governor = schedutil" + schedutil_cpus="$schedutil_cpus $policy_name" + else + log_info " $policy_name: governor = $gov (not schedutil)" + fi + fi +done + +if [ -z "$schedutil_cpus" ]; then + log_warn "No CPU policies using schedutil governor found" + log_warn "schedutil is required for PELT-driven frequency scaling" + log_warn "Check available governors:" + for policy_dir in "$CPUFREQ_BASE"/policy*; do + [ -d "$policy_dir" ] || continue + avail_file="$policy_dir/scaling_available_governors" + if [ -f "$avail_file" ]; then + avail=$(cat "$avail_file" 2>/dev/null) + log_info " $(basename "$policy_dir"): available = $avail" + fi + done + echo "$TESTNAME SKIP" > "$res_file" + exit 0 +fi + +# --------------------------------------------------------------------------- +# For each schedutil policy: validate frequency range and rate_limit_us +# --------------------------------------------------------------------------- +log_info "=== Schedutil Policy Validation ===" + +for policy_name in $schedutil_cpus; do + policy_dir="$CPUFREQ_BASE/$policy_name" + log_info "--- $policy_name ---" + + # Frequency range + min_freq_file="$policy_dir/scaling_min_freq" + max_freq_file="$policy_dir/scaling_max_freq" + cur_freq_file="$policy_dir/scaling_cur_freq" + + if [ -f "$min_freq_file" ] && [ -f "$max_freq_file" ]; then + min_freq=$(cat "$min_freq_file" 2>/dev/null) + max_freq=$(cat "$max_freq_file" 2>/dev/null) + log_pass " freq range: ${min_freq} kHz — ${max_freq} kHz" + + if [ -n "$min_freq" ] && [ -n "$max_freq" ] && \ + [ "$max_freq" -ge "$min_freq" ] 2>/dev/null; then + log_pass " freq range valid (max >= min)" + else + log_fail " freq range invalid: max ($max_freq) < min ($min_freq)" + pass=false + fi + fi + + if [ -f "$cur_freq_file" ]; then + cur_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info " current freq: ${cur_freq} kHz" + fi + + # rate_limit_us — schedutil-specific tunable + rate_limit_file="$policy_dir/schedutil/rate_limit_us" + if [ -f "$rate_limit_file" ]; then + rate_limit=$(cat "$rate_limit_file" 2>/dev/null) + log_pass " rate_limit_us = $rate_limit us" + else + log_info " rate_limit_us not present (kernel version dependent)" + fi + + # Available frequencies + avail_freq_file="$policy_dir/scaling_available_frequencies" + if [ -f "$avail_freq_file" ]; then + avail_freqs=$(cat "$avail_freq_file" 2>/dev/null) + freq_count=$(printf '%s\n' "$avail_freqs" | wc -w) + log_info " available frequencies ($freq_count): $avail_freqs" + fi + + # Related CPUs + related_file="$policy_dir/related_cpus" + if [ -f "$related_file" ]; then + related=$(cat "$related_file" 2>/dev/null) + log_info " related CPUs: $related" + fi +done + +# --------------------------------------------------------------------------- +# Functional: verify frequency responds to CPU load under schedutil +# --------------------------------------------------------------------------- +log_info "=== Schedutil Frequency Response Under Load ===" + +# Pick first schedutil policy for load test +first_policy=$(printf '%s\n' "$schedutil_cpus" | awk '{print $1}') +policy_dir="$CPUFREQ_BASE/$first_policy" +cur_freq_file="$policy_dir/scaling_cur_freq" + +if [ -f "$cur_freq_file" ]; then + idle_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info " Idle frequency ($first_policy): ${idle_freq} kHz" + + # Spawn CPU-bound load + log_info " Spawning CPU-bound task (3 seconds)..." + ( i=0; while true; do i=$((i + 1)); done ) & + LOAD_PID=$! + + sleep 2 + + load_freq=$(cat "$cur_freq_file" 2>/dev/null) + log_info " Under-load frequency ($first_policy): ${load_freq} kHz" + + kill "$LOAD_PID" 2>/dev/null || true + LOAD_PID="" + + if [ -n "$idle_freq" ] && [ -n "$load_freq" ] && \ + [ "$load_freq" -ge "$idle_freq" ] 2>/dev/null; then + log_pass " Frequency increased under load: ${idle_freq} -> ${load_freq} kHz" + log_pass " schedutil is responding to PELT utilization signal" + elif [ "$load_freq" = "$idle_freq" ]; then + log_warn " Frequency unchanged under load (${idle_freq} kHz)" + log_warn " May be at max freq already, or schedutil rate_limit_us is high" + log_info " This is a warning only — not a failure" + else + log_warn " Could not compare frequencies (idle=${idle_freq} load=${load_freq})" + fi +else + log_warn " scaling_cur_freq not available for $first_policy — skipping load test" +fi + +# --------------------------------------------------------------------------- +# /sys/devices/system/cpu/cpu*/cpufreq/util — PELT util per-CPU (if present) +# --------------------------------------------------------------------------- +log_info "=== Per-CPU PELT Utilization (cpufreq util) ===" + +util_found=false +for cpu_dir in "$CPU_BASE"/cpu[0-9]*; do + [ -d "$cpu_dir" ] || continue + util_file="$cpu_dir/cpufreq/util" + if [ -f "$util_file" ]; then + util_val=$(cat "$util_file" 2>/dev/null) + cpu_name=$(basename "$cpu_dir") + log_info " $cpu_name util = $util_val" + util_found=true + fi +done + +if ! $util_found; then + log_info " Per-CPU cpufreq/util not present (kernel version dependent)" +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0 diff --git a/Runner/suites/Kernel/Scheduler/PELT_tunables/PELT_tunables.yaml b/Runner/suites/Kernel/Scheduler/PELT_tunables/PELT_tunables.yaml new file mode 100644 index 00000000..ee27ff2d --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_tunables/PELT_tunables.yaml @@ -0,0 +1,23 @@ +metadata: + name: pelt-tunables + format: "Lava-Test Test Definition 1.0" + description: "Validates CFS/PELT scheduler tunables in /proc/sys/kernel/" + maintainer: + - vnarapar@qti.qualcomm.com + os: + - linux + scope: + - functional + devices: + - rb3gen2 + - qcs6490 + - qcs8300 + - qcs9100 + - sa8775p + +run: + steps: + - REPO_PATH=$PWD + - cd "$REPO_PATH/Runner/suites/Kernel/Scheduler/PELT_tunables" || true + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PELT_tunables.res || true diff --git a/Runner/suites/Kernel/Scheduler/PELT_tunables/README.md b/Runner/suites/Kernel/Scheduler/PELT_tunables/README.md new file mode 100644 index 00000000..e971d682 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_tunables/README.md @@ -0,0 +1,56 @@ +# PELT_tunables — CFS/PELT Scheduler Tunables Validation + +## Overview + +Validates the **CFS and PELT scheduler tunables** exposed via +`/proc/sys/kernel/sched_*`. These tunables control scheduling latency, +task migration cost, and utilization clamping (uclamp) which directly +influence PELT behaviour. + +## What Is Tested + +| Tunable | Path | Check | +|---|---|---| +| `sched_latency_ns` | `/proc/sys/kernel/` | Present and > 0 | +| `sched_min_granularity_ns` | `/proc/sys/kernel/` | Present and > 0 | +| `sched_wakeup_granularity_ns` | `/proc/sys/kernel/` | Present and > 0 | +| CFS invariant | — | `latency_ns >= min_granularity_ns` | +| `sched_migration_cost_ns` | `/proc/sys/kernel/` | Present (informational) | +| `sched_util_clamp_min` | `/proc/sys/kernel/` | Present if CONFIG_UCLAMP_TASK | +| `sched_util_clamp_max` | `/proc/sys/kernel/` | Present if CONFIG_UCLAMP_TASK | +| uclamp invariant | — | `clamp_min <= clamp_max` | +| Per-domain tunables | `/proc/sys/kernel/sched_domain/` | Logged (informational) | + +## CFS Timing Invariant + +The kernel enforces: + +``` +sched_latency_ns >= sched_min_granularity_ns +``` + +If violated, the CFS scheduler may behave incorrectly. This test +explicitly validates this invariant. + +## uclamp (Utilization Clamp) + +Available on kernels ≥ 5.3 with `CONFIG_UCLAMP_TASK`. Controls the +min/max PELT utilization signal used by the Energy-Aware Scheduler (EAS) +and cpufreq governors. + +## Pass / Fail Criteria + +- **FAIL**: Core CFS timing tunables missing or zero, or CFS/uclamp invariant violated +- **PASS**: All required tunables present, non-zero, and invariants hold + +## Usage + +```sh +./run.sh +``` + +## Dependencies + +- `/proc/sys/kernel/sched_*` +- `CONFIG_UCLAMP_TASK` (optional, for uclamp checks) +- `grep`, `awk`, `cat` diff --git a/Runner/suites/Kernel/Scheduler/PELT_tunables/run.sh b/Runner/suites/Kernel/Scheduler/PELT_tunables/run.sh new file mode 100644 index 00000000..aeea3197 --- /dev/null +++ b/Runner/suites/Kernel/Scheduler/PELT_tunables/run.sh @@ -0,0 +1,181 @@ +#!/bin/sh + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# Robustly find and source init_env +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi + +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="PELT_tunables" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 +res_file="./$TESTNAME.res" + +log_info "================================================================================" +log_info "============ Starting $TESTNAME Testcase =======================================" +log_info "================================================================================" +log_info "Validates CFS/PELT scheduler tunables in /proc/sys/kernel/" + +check_dependencies grep awk cat + +pass=true + +SYSCTL_DIR="/proc/sys/kernel" + +# --------------------------------------------------------------------------- +# Core CFS timing tunables — must be present and non-zero +# --------------------------------------------------------------------------- +log_info "=== Core CFS Timing Tunables ===" + +for tunable in sched_min_granularity_ns sched_latency_ns sched_wakeup_granularity_ns; do + path="$SYSCTL_DIR/$tunable" + if [ -f "$path" ]; then + val=$(cat "$path" 2>/dev/null) + if [ -n "$val" ] && [ "$val" -gt 0 ] 2>/dev/null; then + log_pass " $tunable = $val ns" + else + log_fail " $tunable is zero or unreadable (val='$val')" + pass=false + fi + else + log_fail " $tunable not found at $path" + pass=false + fi +done + +# Validate ordering: latency >= min_granularity (CFS invariant) +lat_path="$SYSCTL_DIR/sched_latency_ns" +gran_path="$SYSCTL_DIR/sched_min_granularity_ns" +if [ -f "$lat_path" ] && [ -f "$gran_path" ]; then + lat=$(cat "$lat_path" 2>/dev/null) + gran=$(cat "$gran_path" 2>/dev/null) + if [ -n "$lat" ] && [ -n "$gran" ] && [ "$lat" -ge "$gran" ] 2>/dev/null; then + log_pass " sched_latency_ns ($lat) >= sched_min_granularity_ns ($gran) — CFS invariant holds" + else + log_fail " CFS invariant violated: sched_latency_ns ($lat) < sched_min_granularity_ns ($gran)" + pass=false + fi +fi + +# --------------------------------------------------------------------------- +# Migration cost tunable +# --------------------------------------------------------------------------- +log_info "=== Migration Cost Tunable ===" + +mig_path="$SYSCTL_DIR/sched_migration_cost_ns" +if [ -f "$mig_path" ]; then + val=$(cat "$mig_path" 2>/dev/null) + log_pass " sched_migration_cost_ns = $val ns" +else + log_warn " sched_migration_cost_ns not found (optional)" +fi + +# --------------------------------------------------------------------------- +# PELT util clamp tunables (kernel >= 5.3, uclamp) +# --------------------------------------------------------------------------- +log_info "=== PELT Utilization Clamp Tunables (uclamp) ===" + +uclamp_found=false +for tunable in sched_util_clamp_min sched_util_clamp_max; do + path="$SYSCTL_DIR/$tunable" + if [ -f "$path" ]; then + val=$(cat "$path" 2>/dev/null) + log_pass " $tunable = $val" + uclamp_found=true + else + log_info " $tunable not present (requires CONFIG_UCLAMP_TASK, kernel >= 5.3)" + fi +done + +if $uclamp_found; then + # Validate: clamp_min <= clamp_max + min_path="$SYSCTL_DIR/sched_util_clamp_min" + max_path="$SYSCTL_DIR/sched_util_clamp_max" + if [ -f "$min_path" ] && [ -f "$max_path" ]; then + umin=$(cat "$min_path" 2>/dev/null) + umax=$(cat "$max_path" 2>/dev/null) + if [ -n "$umin" ] && [ -n "$umax" ] && [ "$umin" -le "$umax" ] 2>/dev/null; then + log_pass " uclamp invariant: clamp_min ($umin) <= clamp_max ($umax)" + else + log_fail " uclamp invariant violated: clamp_min ($umin) > clamp_max ($umax)" + pass=false + fi + fi +fi + +# --------------------------------------------------------------------------- +# Additional scheduler tunables (informational) +# --------------------------------------------------------------------------- +log_info "=== Additional Scheduler Tunables (informational) ===" + +for tunable in sched_nr_migrate sched_schedstats sched_child_runs_first \ + sched_autogroup_enabled; do + path="$SYSCTL_DIR/$tunable" + if [ -f "$path" ]; then + val=$(cat "$path" 2>/dev/null) + log_info " $tunable = $val" + else + log_info " $tunable: not present" + fi +done + +# --------------------------------------------------------------------------- +# /proc/sys/kernel/sched_domain — per-domain tunables (if present) +# --------------------------------------------------------------------------- +log_info "=== Per-CPU Sched Domain Tunables ===" + +sched_domain_base="/proc/sys/kernel/sched_domain" +if [ -d "$sched_domain_base" ]; then + log_info "sched_domain sysctl present: $sched_domain_base" + for cpu_dir in "$sched_domain_base"/cpu*; do + [ -d "$cpu_dir" ] || continue + cpu_name=$(basename "$cpu_dir") + for dom_dir in "$cpu_dir"/domain*; do + [ -d "$dom_dir" ] || continue + dom_name=$(basename "$dom_dir") + for prop in busy_factor imbalance_pct cache_nice_tries \ + min_interval max_interval; do + prop_file="$dom_dir/$prop" + if [ -f "$prop_file" ]; then + val=$(cat "$prop_file" 2>/dev/null) + log_info " $cpu_name/$dom_name/$prop = $val" + fi + done + done + done +else + log_info "sched_domain sysctl not present (kernel version dependent)" +fi + +if $pass; then + log_pass "$TESTNAME : Test Passed" + echo "$TESTNAME PASS" > "$res_file" +else + log_fail "$TESTNAME : Test Failed" + echo "$TESTNAME FAIL" > "$res_file" +fi + +log_info "-------------------Completed $TESTNAME Testcase----------------------------" +exit 0