|
| 1 | +#!/bin/bash |
| 2 | +set -uo pipefail |
| 3 | + |
| 4 | +echo "=== End-to-End Cold Start Benchmark ===" |
| 5 | +echo "Date: $(date -u)" |
| 6 | +echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'none')" |
| 7 | +df -h /tmp | tail -1 | awk '{print "Disk: " $4 " free"}' |
| 8 | +echo "" |
| 9 | + |
| 10 | +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" |
| 11 | +PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" |
| 12 | +ZS="$PROJECT_DIR/bin/zerostart-linux-x86_64" |
| 13 | +export PATH="$HOME/.local/bin:$HOME/.cargo/bin:$PATH" |
| 14 | +export PYTHONPATH="$PROJECT_DIR/python:${PYTHONPATH:-}" |
| 15 | + |
| 16 | +MODEL_ID="${SNAP_MODEL:-Qwen/Qwen2.5-7B}" |
| 17 | + |
| 18 | +# ============================================================ |
| 19 | +# Scenario 1: pip install + from_pretrained (traditional) |
| 20 | +# ============================================================ |
| 21 | +echo "--- Scenario 1: pip install + from_pretrained ---" |
| 22 | +# Clean slate |
| 23 | +rm -rf /tmp/.pip-bench-venv |
| 24 | +BENCH_START=$(date +%s%3N) |
| 25 | + |
| 26 | +cat > /tmp/bench_pip.py << PYEOF |
| 27 | +import time |
| 28 | +t_script = time.monotonic() |
| 29 | +
|
| 30 | +import torch |
| 31 | +from transformers import AutoModelForCausalLM, AutoTokenizer |
| 32 | +
|
| 33 | +t_import = time.monotonic() |
| 34 | +
|
| 35 | +tokenizer = AutoTokenizer.from_pretrained("$MODEL_ID") |
| 36 | +model = AutoModelForCausalLM.from_pretrained("$MODEL_ID", torch_dtype=torch.bfloat16, device_map="cpu") |
| 37 | +model.eval() |
| 38 | +t_model = time.monotonic() |
| 39 | +
|
| 40 | +inputs = tokenizer("The quick brown fox", return_tensors="pt") |
| 41 | +with torch.no_grad(): |
| 42 | + out = model.generate(**inputs, max_new_tokens=10, do_sample=False) |
| 43 | +result = tokenizer.decode(out[0], skip_special_tokens=True) |
| 44 | +t_inf = time.monotonic() |
| 45 | +
|
| 46 | +print(f"RESULT: {result}") |
| 47 | +print(f"TIME import={t_import-t_script:.2f}s model={t_model-t_import:.2f}s inference={t_inf-t_model:.2f}s total={t_inf-t_script:.2f}s") |
| 48 | +PYEOF |
| 49 | + |
| 50 | +# Install into a fresh venv (simulates cold container) |
| 51 | +python3 -m venv /tmp/.pip-bench-venv |
| 52 | +/tmp/.pip-bench-venv/bin/pip install -q torch transformers accelerate 2>&1 | tail -3 |
| 53 | +PIP_DONE=$(date +%s%3N) |
| 54 | +echo " pip install: $(( PIP_DONE - BENCH_START ))ms" |
| 55 | + |
| 56 | +/tmp/.pip-bench-venv/bin/python /tmp/bench_pip.py 2>&1 | grep -E "^(RESULT|TIME)" |
| 57 | +BENCH_END=$(date +%s%3N) |
| 58 | +echo " Total wall clock (install + load + inference): $(( BENCH_END - BENCH_START ))ms" |
| 59 | +rm -rf /tmp/.pip-bench-venv |
| 60 | +echo "" |
| 61 | + |
| 62 | +# ============================================================ |
| 63 | +# Scenario 2: zerostart cold + from_pretrained |
| 64 | +# ============================================================ |
| 65 | +echo "--- Scenario 2: zerostart cold + from_pretrained ---" |
| 66 | +export ZEROSTART_CACHE="/tmp/.zs-e2e-bench" |
| 67 | +export ZS_NO_SHARED_CACHE=1 |
| 68 | +rm -rf "$ZEROSTART_CACHE" |
| 69 | + |
| 70 | +cat > /tmp/bench_zs_cold.py << PYEOF |
| 71 | +import time |
| 72 | +t_script = time.monotonic() |
| 73 | +
|
| 74 | +import torch |
| 75 | +from transformers import AutoModelForCausalLM, AutoTokenizer |
| 76 | +
|
| 77 | +t_import = time.monotonic() |
| 78 | +
|
| 79 | +tokenizer = AutoTokenizer.from_pretrained("$MODEL_ID") |
| 80 | +model = AutoModelForCausalLM.from_pretrained("$MODEL_ID", torch_dtype=torch.bfloat16, device_map="cpu") |
| 81 | +model.eval() |
| 82 | +t_model = time.monotonic() |
| 83 | +
|
| 84 | +inputs = tokenizer("The quick brown fox", return_tensors="pt") |
| 85 | +with torch.no_grad(): |
| 86 | + out = model.generate(**inputs, max_new_tokens=10, do_sample=False) |
| 87 | +result = tokenizer.decode(out[0], skip_special_tokens=True) |
| 88 | +t_inf = time.monotonic() |
| 89 | +
|
| 90 | +print(f"RESULT: {result}") |
| 91 | +print(f"TIME import={t_import-t_script:.2f}s model={t_model-t_import:.2f}s inference={t_inf-t_model:.2f}s total={t_inf-t_script:.2f}s") |
| 92 | +PYEOF |
| 93 | + |
| 94 | +ZS_START=$(date +%s%3N) |
| 95 | +$ZS run -p torch -p transformers -p accelerate /tmp/bench_zs_cold.py 2>&1 | grep -E "^(RESULT|TIME|Resolved|Daemon|Environment|Cache)" |
| 96 | +ZS_END=$(date +%s%3N) |
| 97 | +echo " Total wall clock (zerostart cold + load + inference): $(( ZS_END - ZS_START ))ms" |
| 98 | +echo "" |
| 99 | + |
| 100 | +# ============================================================ |
| 101 | +# Scenario 3: zerostart warm + from_pretrained |
| 102 | +# ============================================================ |
| 103 | +echo "--- Scenario 3: zerostart warm + from_pretrained ---" |
| 104 | +# Cache is now populated from Scenario 2 |
| 105 | + |
| 106 | +ZS_WARM_START=$(date +%s%3N) |
| 107 | +$ZS run -p torch -p transformers -p accelerate /tmp/bench_zs_cold.py 2>&1 | grep -E "^(RESULT|TIME|Cache)" |
| 108 | +ZS_WARM_END=$(date +%s%3N) |
| 109 | +echo " Total wall clock (zerostart warm + load + inference): $(( ZS_WARM_END - ZS_WARM_START ))ms" |
| 110 | +echo "" |
| 111 | + |
| 112 | +# ============================================================ |
| 113 | +# Scenario 4: zerostart warm + hydrate (snapshot) |
| 114 | +# ============================================================ |
| 115 | +echo "--- Scenario 4: Create snapshot for hydrate ---" |
| 116 | + |
| 117 | +cat > /tmp/bench_create_snap.py << PYEOF |
| 118 | +import time |
| 119 | +t0 = time.monotonic() |
| 120 | +import torch |
| 121 | +from transformers import AutoModelForCausalLM, AutoTokenizer |
| 122 | +from zerostart.snapshot import snapshot |
| 123 | +
|
| 124 | +tokenizer = AutoTokenizer.from_pretrained("$MODEL_ID") |
| 125 | +model = AutoModelForCausalLM.from_pretrained("$MODEL_ID", torch_dtype=torch.bfloat16, device_map="cpu") |
| 126 | +model.eval() |
| 127 | +
|
| 128 | +import shutil |
| 129 | +shutil.rmtree("/tmp/e2e-snapshot", ignore_errors=True) |
| 130 | +snapshot(state={"model": model, "tokenizer": tokenizer}, path="/tmp/e2e-snapshot") |
| 131 | +t1 = time.monotonic() |
| 132 | +print(f"Snapshot created in {t1-t0:.2f}s") |
| 133 | +PYEOF |
| 134 | + |
| 135 | +$ZS run -p torch -p transformers -p accelerate -p cloudpickle /tmp/bench_create_snap.py 2>&1 | grep -E "^(Snapshot|Cache)" |
| 136 | + |
| 137 | +echo "" |
| 138 | +echo "--- Scenario 4: zerostart warm + hydrate + inference ---" |
| 139 | + |
| 140 | +cat > /tmp/bench_hydrate.py << PYEOF |
| 141 | +import time |
| 142 | +t_script = time.monotonic() |
| 143 | +
|
| 144 | +import torch |
| 145 | +from zerostart.snapshot import hydrate |
| 146 | +
|
| 147 | +t_import = time.monotonic() |
| 148 | +
|
| 149 | +restored = hydrate("/tmp/e2e-snapshot") |
| 150 | +model = restored["model"] |
| 151 | +model.eval() |
| 152 | +tokenizer = restored["tokenizer"] |
| 153 | +t_hydrate = time.monotonic() |
| 154 | +
|
| 155 | +inputs = tokenizer("The quick brown fox", return_tensors="pt") |
| 156 | +with torch.no_grad(): |
| 157 | + out = model.generate(**inputs, max_new_tokens=10, do_sample=False) |
| 158 | +result = tokenizer.decode(out[0], skip_special_tokens=True) |
| 159 | +t_inf = time.monotonic() |
| 160 | +
|
| 161 | +print(f"RESULT: {result}") |
| 162 | +print(f"TIME import={t_import-t_script:.2f}s hydrate={t_hydrate-t_import:.2f}s inference={t_inf-t_hydrate:.2f}s total={t_inf-t_script:.2f}s") |
| 163 | +PYEOF |
| 164 | + |
| 165 | +ZS_HYD_START=$(date +%s%3N) |
| 166 | +$ZS run -p torch -p transformers -p accelerate -p cloudpickle /tmp/bench_hydrate.py 2>&1 | grep -E "^(RESULT|TIME|Cache)" |
| 167 | +ZS_HYD_END=$(date +%s%3N) |
| 168 | +echo " Total wall clock (zerostart warm + hydrate + inference): $(( ZS_HYD_END - ZS_HYD_START ))ms" |
| 169 | +echo "" |
| 170 | + |
| 171 | +# ============================================================ |
| 172 | +# Summary |
| 173 | +# ============================================================ |
| 174 | +echo "============================================================" |
| 175 | +echo "MODEL: $MODEL_ID" |
| 176 | +echo "" |
| 177 | +echo "Boot-to-inference wall clock:" |
| 178 | +echo " 1. pip install + from_pretrained: $(( BENCH_END - BENCH_START ))ms" |
| 179 | +echo " 2. zerostart cold + from_pretrained: $(( ZS_END - ZS_START ))ms" |
| 180 | +echo " 3. zerostart warm + from_pretrained: $(( ZS_WARM_END - ZS_WARM_START ))ms" |
| 181 | +echo " 4. zerostart warm + hydrate (snapshot): $(( ZS_HYD_END - ZS_HYD_START ))ms" |
| 182 | +echo "============================================================" |
0 commit comments