-
Notifications
You must be signed in to change notification settings - Fork 41
Expand file tree
/
Copy pathindex.html
More file actions
1310 lines (1217 loc) · 102 KB
/
index.html
File metadata and controls
1310 lines (1217 loc) · 102 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>quant.cpp — Understanding KV Cache Compression</title>
<meta name="description" content="An educational guide to KV cache compression in LLM inference. Learn how quant.cpp achieves 6.4x compression with only 3% quality loss.">
<!-- Open Graph / Social Preview -->
<meta property="og:title" content="quant.cpp — AI's Memory, Compressed 6.4x">
<meta property="og:description" content="An interactive guide to KV cache compression. 4 orthogonal techniques achieve 6.4x compression at 3% quality cost.">
<meta property="og:image" content="https://quantumaikr.github.io/quant.cpp/guide/og-image.png">
<meta property="og:url" content="https://quantumaikr.github.io/quant.cpp/guide/">
<meta property="og:type" content="website">
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:title" content="quant.cpp — AI's Memory, Compressed 6.4x">
<meta name="twitter:description" content="An interactive guide to KV cache compression. 6.4x compression, 59% faster attention, 3% quality cost.">
<meta name="twitter:image" content="https://quantumaikr.github.io/quant.cpp/guide/og-image.png">
<style>
/* ===== Reset & Base ===== */
*{margin:0;padding:0;box-sizing:border-box}
:root{
--bg:#0a0a0f;--bg2:#12121a;--bg3:#1a1a2e;
--text:#e0e0e8;--text2:#a0a0b0;--text3:#707088;
--accent:#6c5ce7;--accent2:#a29bfe;--accent3:#fd79a8;
--green:#00b894;--orange:#fdcb6e;--red:#ff6b6b;
--blue:#74b9ff;--cyan:#81ecec;
--radius:12px;--shadow:0 4px 30px rgba(0,0,0,.3);
}
html{scroll-behavior:smooth;font-size:16px}
body{font-family:'Inter',-apple-system,BlinkMacSystemFont,'Segoe UI',sans-serif;background:var(--bg);color:var(--text);line-height:1.7;overflow-x:hidden}
a{color:var(--accent2);text-decoration:none;transition:color .2s}
a:hover{color:var(--accent3)}
code{font-family:'JetBrains Mono','Fira Code',monospace;background:var(--bg3);padding:2px 6px;border-radius:4px;font-size:.9em}
pre{background:var(--bg2);border:1px solid rgba(255,255,255,.06);border-radius:var(--radius);padding:1.5em;overflow-x:auto;font-size:.85em;line-height:1.6}
pre code{background:none;padding:0}
img{max-width:100%;border-radius:var(--radius)}
h1,h2,h3,h4{font-weight:700;line-height:1.3}
/* ===== Layout ===== */
.container{max-width:900px;margin:0 auto;padding:0 1.5rem}
section{padding:5rem 0;position:relative}
.section-label{display:inline-block;font-size:.75rem;font-weight:600;letter-spacing:.15em;text-transform:uppercase;color:var(--accent2);background:rgba(108,92,231,.12);padding:4px 12px;border-radius:20px;margin-bottom:1rem}
section h2{font-size:2rem;margin-bottom:1rem}
section h3{font-size:1.4rem;margin:2rem 0 .8rem;color:var(--accent2)}
section p{color:var(--text2);margin-bottom:1rem}
/* ===== Animations ===== */
@keyframes fadeUp{from{opacity:0;transform:translateY(40px)}to{opacity:1;transform:translateY(0)}}
@keyframes fadeIn{from{opacity:0}to{opacity:1}}
@keyframes slideLeft{from{opacity:0;transform:translateX(-60px)}to{opacity:1;transform:translateX(0)}}
@keyframes slideRight{from{opacity:0;transform:translateX(60px)}to{opacity:1;transform:translateX(0)}}
@keyframes pulse{0%,100%{transform:scale(1)}50%{transform:scale(1.05)}}
@keyframes glow{0%,100%{box-shadow:0 0 20px rgba(108,92,231,.2)}50%{box-shadow:0 0 40px rgba(108,92,231,.4)}}
@keyframes float{0%,100%{transform:translateY(0)}50%{transform:translateY(-10px)}}
@keyframes typing{from{width:0}to{width:100%}}
@keyframes blink{50%{border-color:transparent}}
@keyframes countUp{from{opacity:0;transform:scale(.5)}to{opacity:1;transform:scale(1)}}
@keyframes barGrow{from{width:0}to{width:var(--w)}}
@keyframes flowRight{from{transform:translateX(-100%);opacity:0}to{transform:translateX(0);opacity:1}}
@keyframes compress{0%{transform:scaleX(1)}50%{transform:scaleX(.15)}100%{transform:scaleX(.15)}}
@keyframes shimmer{0%{background-position:-200% 0}100%{background-position:200% 0}}
.reveal{opacity:0;transform:translateY(40px);transition:opacity .8s ease,transform .8s ease}
.reveal.visible{opacity:1;transform:translateY(0)}
.reveal-left{opacity:0;transform:translateX(-60px);transition:opacity .8s ease,transform .8s ease}
.reveal-left.visible{opacity:1;transform:translateX(0)}
.reveal-right{opacity:0;transform:translateX(60px);transition:opacity .8s ease,transform .8s ease}
.reveal-right.visible{opacity:1;transform:translateX(0)}
.stagger>*{opacity:0;transform:translateY(30px);transition:opacity .6s ease,transform .6s ease}
.stagger.visible>*:nth-child(1){transition-delay:.1s;opacity:1;transform:translateY(0)}
.stagger.visible>*:nth-child(2){transition-delay:.2s;opacity:1;transform:translateY(0)}
.stagger.visible>*:nth-child(3){transition-delay:.3s;opacity:1;transform:translateY(0)}
.stagger.visible>*:nth-child(4){transition-delay:.4s;opacity:1;transform:translateY(0)}
.stagger.visible>*:nth-child(5){transition-delay:.5s;opacity:1;transform:translateY(0)}
.stagger.visible>*:nth-child(6){transition-delay:.6s;opacity:1;transform:translateY(0)}
/* ===== Navigation ===== */
nav{position:fixed;top:0;left:0;right:0;z-index:100;background:rgba(10,10,15,.85);backdrop-filter:blur(20px);border-bottom:1px solid rgba(255,255,255,.06);transition:transform .3s}
nav .inner{max-width:1100px;margin:0 auto;padding:.6rem 1.5rem;display:flex;align-items:center;justify-content:space-between}
nav .logo{font-weight:800;font-size:1.1rem;color:var(--text)}
nav .logo span{color:var(--accent2)}
nav .nav-right{display:flex;align-items:center;gap:1.5rem}
nav ul{list-style:none;display:flex;gap:1.5rem}
nav li a{font-size:.8rem;color:var(--text3);font-weight:500;transition:color .2s;letter-spacing:.02em}
nav li a:hover,nav li a.active{color:var(--accent2)}
@media(max-width:768px){nav ul{display:none}}
/* ===== Language Toggle ===== */
.lang-toggle{display:flex;align-items:center;gap:0;background:var(--bg3);border-radius:20px;overflow:hidden;border:1px solid rgba(255,255,255,.08)}
.lang-btn{padding:4px 12px;font-size:.75rem;font-weight:600;cursor:pointer;transition:all .3s;color:var(--text3);border:none;background:none}
.lang-btn.active{background:var(--accent);color:#fff;border-radius:20px}
/* ===== Language transition ===== */
[data-i18n],[data-i18n-html]{transition:opacity .25s ease}
.lang-fading [data-i18n],.lang-fading [data-i18n-html]{opacity:0}
/* ===== Hero ===== */
#hero{min-height:100vh;display:flex;align-items:center;justify-content:center;text-align:center;position:relative;overflow:hidden;padding-top:4rem}
#hero::before{content:'';position:absolute;top:-50%;left:-50%;width:200%;height:200%;background:radial-gradient(circle at 30% 40%,rgba(108,92,231,.08) 0%,transparent 50%),radial-gradient(circle at 70% 60%,rgba(253,121,168,.05) 0%,transparent 50%);animation:float 8s ease-in-out infinite}
#hero h1{font-size:clamp(2.5rem,6vw,4.5rem);font-weight:900;margin-bottom:1rem;animation:fadeUp .8s ease both}
#hero h1 .highlight{background:linear-gradient(135deg,var(--accent),var(--accent3));-webkit-background-clip:text;-webkit-text-fill-color:transparent;background-clip:text}
#hero .subtitle{font-size:clamp(1rem,2.5vw,1.4rem);color:var(--text2);max-width:600px;margin:0 auto 2rem;animation:fadeUp .8s ease .2s both}
#hero .hero-stats{display:flex;gap:2rem;justify-content:center;flex-wrap:wrap;animation:fadeUp .8s ease .4s both}
.stat-card{background:var(--bg2);border:1px solid rgba(255,255,255,.06);border-radius:var(--radius);padding:1.2rem 1.8rem;min-width:140px;transition:transform .3s,box-shadow .3s}
.stat-card:hover{transform:translateY(-4px);box-shadow:var(--shadow)}
.stat-card .num{font-size:2rem;font-weight:800;background:linear-gradient(135deg,var(--accent2),var(--cyan));-webkit-background-clip:text;-webkit-text-fill-color:transparent;background-clip:text}
.stat-card .label{font-size:.75rem;color:var(--text3);margin-top:.2rem}
.hero-scroll{position:absolute;bottom:2rem;left:50%;transform:translateX(-50%);animation:float 2s ease-in-out infinite}
.hero-scroll svg{width:24px;height:24px;stroke:var(--text3);stroke-width:2;fill:none}
/* ===== TOC ===== */
#toc{background:var(--bg2);border-top:1px solid rgba(255,255,255,.04);border-bottom:1px solid rgba(255,255,255,.04)}
#toc .grid{display:grid;grid-template-columns:repeat(auto-fit,minmax(250px,1fr));gap:1rem}
.toc-item{background:var(--bg3);border-radius:var(--radius);padding:1.2rem;display:flex;align-items:flex-start;gap:1rem;transition:transform .3s,background .3s;cursor:pointer;border:1px solid transparent}
.toc-item:hover{transform:translateY(-2px);background:rgba(108,92,231,.1);border-color:rgba(108,92,231,.2)}
.toc-num{font-size:1.5rem;font-weight:800;color:var(--accent);min-width:2rem}
.toc-text h4{font-size:.95rem;margin-bottom:.2rem}
.toc-text p{font-size:.8rem;color:var(--text3);line-height:1.4}
/* ===== Visualization Blocks ===== */
.viz{background:var(--bg2);border:1px solid rgba(255,255,255,.06);border-radius:var(--radius);padding:2rem;margin:1.5rem 0;overflow:hidden}
.viz-title{font-size:.8rem;font-weight:600;color:var(--text3);text-transform:uppercase;letter-spacing:.1em;margin-bottom:1rem}
/* Memory bar animation */
.mem-bar-container{margin:.8rem 0}
.mem-bar-label{display:flex;justify-content:space-between;font-size:.8rem;margin-bottom:.3rem}
.mem-bar-label span:first-child{color:var(--text2)}
.mem-bar-label span:last-child{font-weight:600}
.mem-bar{height:28px;border-radius:6px;background:var(--bg3);overflow:hidden;position:relative}
.mem-bar-fill{height:100%;border-radius:6px;transition:width 1.5s ease;width:0;display:flex;align-items:center;padding-left:8px;font-size:.7rem;font-weight:600;color:rgba(0,0,0,.7)}
.mem-bar-fill.bar-fp32{background:linear-gradient(90deg,var(--red),#e17055)}
.mem-bar-fill.bar-prog{background:linear-gradient(90deg,var(--accent),var(--accent2))}
.mem-bar-fill.bar-aggr{background:linear-gradient(90deg,var(--green),var(--cyan))}
.mem-bar-fill.bar-evict{background:linear-gradient(90deg,#00cec9,var(--cyan))}
.mem-bar-fill.animated{width:var(--w)}
/* Compression pipeline */
.pipeline{display:flex;align-items:center;gap:.5rem;flex-wrap:wrap;margin:1.5rem 0}
.pipe-stage{background:var(--bg3);border:1px solid rgba(255,255,255,.08);border-radius:8px;padding:.8rem 1rem;text-align:center;flex:1;min-width:120px;transition:all .5s ease;opacity:0;transform:translateX(-30px)}
.pipe-stage.visible{opacity:1;transform:translateX(0)}
.pipe-stage .stage-icon{font-size:1.5rem;margin-bottom:.3rem}
.pipe-stage .stage-name{font-size:.75rem;font-weight:600;color:var(--accent2)}
.pipe-stage .stage-detail{font-size:.7rem;color:var(--text3);margin-top:.2rem}
.pipe-arrow{color:var(--text3);font-size:1.2rem;opacity:0;transition:opacity .5s}
.pipe-arrow.visible{opacity:1}
.pipe-result{background:linear-gradient(135deg,rgba(108,92,231,.15),rgba(0,184,148,.15));border:1px solid rgba(108,92,231,.3);border-radius:8px;padding:.8rem 1rem;text-align:center;flex:1;min-width:120px;animation:glow 3s ease-in-out infinite}
.pipe-result .stage-name{color:var(--green);font-weight:700}
/* Layer entropy viz */
.entropy-bar{display:flex;align-items:center;gap:.5rem;margin:.4rem 0;font-size:.8rem}
.entropy-bar .layer-id{min-width:60px;color:var(--text3);font-family:monospace}
.entropy-bar .bar-track{flex:1;height:20px;background:var(--bg3);border-radius:4px;overflow:hidden}
.entropy-bar .bar-value{height:100%;border-radius:4px;transition:width 1.2s ease;width:0;display:flex;align-items:center;justify-content:flex-end;padding-right:6px;font-size:.65rem;font-weight:600;color:rgba(255,255,255,.8)}
.entropy-bar .ent-val{min-width:50px;text-align:right;font-family:monospace;color:var(--text2)}
.bar-hot{background:linear-gradient(90deg,#e17055,var(--red))}
.bar-warm{background:linear-gradient(90deg,var(--orange),#e17055)}
.bar-cool{background:linear-gradient(90deg,var(--blue),var(--accent))}
.bar-cold{background:linear-gradient(90deg,var(--green),var(--cyan))}
/* Comparison table */
table{width:100%;border-collapse:collapse;margin:1.5rem 0;font-size:.9rem}
th{background:var(--bg3);padding:.8rem 1rem;text-align:left;font-size:.8rem;color:var(--accent2);text-transform:uppercase;letter-spacing:.05em;border-bottom:2px solid rgba(108,92,231,.3)}
td{padding:.7rem 1rem;border-bottom:1px solid rgba(255,255,255,.04)}
tr:hover td{background:rgba(108,92,231,.04)}
.highlight-row td{background:rgba(0,184,148,.08);font-weight:600}
.dim{color:var(--text3)}
/* Info cards */
.card-grid{display:grid;grid-template-columns:repeat(auto-fit,minmax(220px,1fr));gap:1rem;margin:1.5rem 0}
.info-card{background:var(--bg2);border:1px solid rgba(255,255,255,.06);border-radius:var(--radius);padding:1.5rem;transition:transform .3s,border-color .3s}
.info-card:hover{transform:translateY(-3px);border-color:rgba(108,92,231,.3)}
.info-card .card-icon{font-size:2rem;margin-bottom:.8rem}
.info-card h4{font-size:1rem;margin-bottom:.4rem}
.info-card p{font-size:.85rem;color:var(--text3);line-height:1.5}
/* Paper reference */
.paper{background:var(--bg2);border-left:3px solid var(--accent);border-radius:0 var(--radius) var(--radius) 0;padding:1rem 1.5rem;margin:1rem 0;transition:border-color .3s}
.paper:hover{border-color:var(--accent3)}
.paper .paper-title{font-weight:600;font-size:.95rem}
.paper .paper-meta{font-size:.8rem;color:var(--text3);margin-top:.3rem}
.paper .paper-desc{font-size:.85rem;color:var(--text2);margin-top:.5rem}
/* KV cache animation */
.kv-anim{display:flex;gap:2px;align-items:flex-end;height:80px;margin:1rem 0}
.kv-block{width:8px;border-radius:2px 2px 0 0;transition:height 1s ease,background 1s ease}
.kv-fp32{background:var(--accent2)}
.kv-4bit{background:var(--green)}
.kv-evicted{background:var(--bg3);opacity:.3}
/* Attention heatmap */
.attn-grid{display:grid;grid-template-columns:repeat(16,1fr);gap:2px;margin:1rem 0}
.attn-cell{aspect-ratio:1;border-radius:2px;transition:background .5s ease}
/* CTA */
.cta{text-align:center;padding:4rem 0}
.cta-btn{display:inline-block;padding:.9rem 2rem;border-radius:8px;font-weight:700;font-size:1rem;transition:all .3s}
.cta-primary{background:linear-gradient(135deg,var(--accent),var(--accent3));color:#fff}
.cta-primary:hover{transform:translateY(-2px);box-shadow:0 8px 30px rgba(108,92,231,.4);color:#fff}
.cta-secondary{border:1px solid rgba(255,255,255,.15);color:var(--text2);margin-left:1rem}
.cta-secondary:hover{border-color:var(--accent2);color:var(--accent2)}
/* Glossary */
.glossary-item{margin:1.5rem 0;padding:1rem;background:var(--bg2);border-radius:var(--radius);border:1px solid rgba(255,255,255,.04)}
.glossary-item dt{font-weight:700;color:var(--accent2);font-size:1.05rem;margin-bottom:.3rem}
.glossary-item dd{color:var(--text2);font-size:.9rem}
/* Footer */
footer{text-align:center;padding:3rem 0;color:var(--text3);font-size:.8rem;border-top:1px solid rgba(255,255,255,.04)}
footer a{color:var(--text2)}
/* ===== Responsive ===== */
@media(max-width:600px){
section{padding:3rem 0}
.hero-stats{flex-direction:column;align-items:center}
.pipeline{flex-direction:column}
.pipe-arrow{transform:rotate(90deg)}
.card-grid{grid-template-columns:1fr}
#toc .grid{grid-template-columns:1fr}
}
</style>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800;900&family=JetBrains+Mono:wght@400;600&display=swap" rel="stylesheet">
</head>
<body>
<!-- ===== Navigation ===== -->
<nav>
<div class="inner">
<div class="logo">quant<span>.cpp</span></div>
<div class="nav-right">
<ul>
<li><a href="#problem" data-i18n="nav.problem">The Problem</a></li>
<li><a href="#solution" data-i18n="nav.solution">Solution</a></li>
<li><a href="#techniques" data-i18n="nav.techniques">4 Techniques</a></li>
<li><a href="#benchmarks" data-i18n="nav.benchmarks">Benchmarks</a></li>
<li><a href="#papers" data-i18n="nav.papers">Papers</a></li>
<li><a href="#glossary" data-i18n="nav.glossary">Glossary</a></li>
</ul>
<div class="lang-toggle">
<button class="lang-btn active" data-lang="en">EN</button>
<button class="lang-btn" data-lang="ko">한국어</button>
</div>
</div>
</div>
</nav>
<!-- ===== Hero ===== -->
<section id="hero">
<div class="container">
<h1 data-i18n-html="hero.title">AI's Memory,<br><span class="highlight">Compressed 6.4x</span></h1>
<p class="subtitle" data-i18n="hero.subtitle">An educational guide to KV cache compression — the key to running long-context AI on your laptop.</p>
<div class="hero-stats">
<div class="stat-card"><div class="num">6.4x</div><div class="label" data-i18n="hero.stat.compression">KV Compression</div></div>
<div class="stat-card"><div class="num">3%</div><div class="label" data-i18n="hero.stat.quality">Quality Cost</div></div>
<div class="stat-card"><div class="num">59%</div><div class="label" data-i18n="hero.stat.attention">Attention Speedup</div></div>
<div class="stat-card"><div class="num">16K</div><div class="label" data-i18n="hero.stat.loc">Lines of C</div></div>
</div>
</div>
<div class="hero-scroll"><svg viewBox="0 0 24 24"><path d="M12 5v14M5 12l7 7 7-7"/></svg></div>
</section>
<!-- ===== Table of Contents ===== -->
<section id="toc">
<div class="container">
<div class="section-label" data-i18n="toc.label">Guide</div>
<h2 data-i18n="toc.title">What You'll Learn</h2>
<div class="grid stagger" style="margin-top:1.5rem">
<a href="#problem" class="toc-item"><div class="toc-num">1</div><div class="toc-text"><h4 data-i18n="toc.item1.title">The Memory Problem</h4><p data-i18n="toc.item1.desc">Why KV cache is the real bottleneck in AI inference</p></div></a>
<a href="#kv-cache" class="toc-item"><div class="toc-num">2</div><div class="toc-text"><h4 data-i18n="toc.item2.title">What is KV Cache?</h4><p data-i18n="toc.item2.desc">How transformers remember context, and why it costs so much</p></div></a>
<a href="#solution" class="toc-item"><div class="toc-num">3</div><div class="toc-text"><h4 data-i18n="toc.item3.title">The Insight</h4><p data-i18n="toc.item3.desc">Not all memories are equally important</p></div></a>
<a href="#techniques" class="toc-item"><div class="toc-num">4</div><div class="toc-text"><h4 data-i18n="toc.item4.title">4 Compression Techniques</h4><p data-i18n="toc.item4.desc">Progressive, K/V Asymmetry, H2O Eviction, PyramidKV</p></div></a>
<a href="#benchmarks" class="toc-item"><div class="toc-num">5</div><div class="toc-text"><h4 data-i18n="toc.item5.title">Benchmarks</h4><p data-i18n="toc.item5.desc">Measured results on Llama 3.2 1B and Qwen3.5</p></div></a>
<a href="#papers" class="toc-item"><div class="toc-num">6</div><div class="toc-text"><h4 data-i18n="toc.item6.title">Research Papers</h4><p data-i18n="toc.item6.desc">Academic foundations behind each technique</p></div></a>
</div>
</div>
</section>
<!-- ===== Chapter 1: The Problem ===== -->
<section id="problem">
<div class="container">
<div class="section-label" data-i18n="ch1.label">Chapter 1</div>
<h2 class="reveal" data-i18n="ch1.title">The Real Bottleneck in AI</h2>
<p class="reveal" data-i18n-html="ch1.intro">When you chat with an AI, it needs to remember everything you've said. This memory is called the <strong>KV cache</strong>. The shocking truth:</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch1.viz.title">Memory Usage: Model vs KV Cache</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span data-i18n="ch1.viz.model">AI Model (Llama 3.2 3B)</span><span>4.0 GB</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-fp32" style="--w:50%">4.0 GB</div></div>
</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span data-i18n="ch1.viz.kv">KV Cache (32K context, FP16)</span><span style="color:var(--red)">8.0 GB</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-fp32" style="--w:100%" data-i18n="ch1.viz.kv.bar">8.0 GB — larger than the model!</div></div>
</div>
<div class="mem-bar-container" style="margin-top:1.5rem;padding-top:1rem;border-top:1px solid rgba(255,255,255,.06)">
<div class="mem-bar-label"><span data-i18n="ch1.viz.compressed">KV Cache with quant.cpp (6.4x)</span><span style="color:var(--green)">1.3 GB</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-aggr" style="--w:16%">1.3 GB</div></div>
</div>
</div>
<p class="reveal" data-i18n-html="ch1.explanation">The KV cache grows with every token in the conversation. At 32K context, it's <strong>2x larger than the model itself</strong>. This is why your laptop runs out of memory during long conversations — not because the model is too big, but because its <em>memory</em> is.</p>
</div>
</section>
<!-- ===== Chapter 2: What is KV Cache ===== -->
<section id="kv-cache" style="background:var(--bg2)">
<div class="container">
<div class="section-label" data-i18n="ch2.label">Chapter 2</div>
<h2 class="reveal" data-i18n="ch2.title">What is KV Cache?</h2>
<p class="reveal" data-i18n-html="ch2.intro">In a Transformer, every token "attends" to all previous tokens. To do this, each token creates a <strong>Key</strong> (what am I?) and a <strong>Value</strong> (what do I contain?). These are stored so future tokens can look back at them.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch2.viz.title">How Attention Works (Simplified)</div>
<div style="font-family:monospace;font-size:.85rem;line-height:2;color:var(--text2)">
<div data-i18n-html="ch2.viz.query">Current token creates: <strong style="color:var(--accent2)">Query</strong> = "What am I looking for?"</div>
<div data-i18n-html="ch2.viz.kv">Each past token stored: <strong style="color:var(--orange)">Key</strong> = "This is what I am" <strong style="color:var(--green)">Value</strong> = "This is my content"</div>
<div style="margin-top:.5rem" data-i18n-html="ch2.viz.formula"><strong style="color:var(--accent3)">Attention</strong> = softmax(<strong style="color:var(--accent2)">Q</strong> × <strong style="color:var(--orange)">K</strong><sup>T</sup>) × <strong style="color:var(--green)">V</strong></div>
<div style="margin-top:.5rem;color:var(--text3)" data-i18n="ch2.viz.summary">→ "Look at all past tokens, focus on the relevant ones, blend their values"</div>
</div>
</div>
<h3 class="reveal" data-i18n="ch2.expensive.title">Why It's Expensive</h3>
<p class="reveal" data-i18n-html="ch2.expensive.desc">For <strong>every layer</strong> and <strong>every token position</strong>, we store a Key vector and a Value vector. A typical model has 16-32 layers. At 32K context:</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch2.growth.title">KV Cache Growth</div>
<div id="kv-growth" style="display:flex;align-items:flex-end;gap:3px;height:120px;padding-top:20px">
<!-- Filled by JS -->
</div>
<div style="display:flex;justify-content:space-between;font-size:.7rem;color:var(--text3);margin-top:.5rem">
<span data-i18n="ch2.growth.1k">1K tokens</span><span>8K</span><span>16K</span><span>32K</span><span>64K</span><span>128K</span>
</div>
</div>
<p class="reveal" data-i18n-html="ch2.growth.desc">Every doubling of context = doubling of KV cache. And the <strong>attention cost is O(n)</strong> per token — at 1000 tokens, attention is already 35% of total compute time.</p>
</div>
</section>
<!-- ===== Chapter 3: The Insight ===== -->
<section id="solution">
<div class="container">
<div class="section-label" data-i18n="ch3.label">Chapter 3</div>
<h2 class="reveal" data-i18n="ch3.title">The Key Insight</h2>
<p class="reveal" style="font-size:1.2rem;color:var(--text);font-weight:500;margin:1.5rem 0" data-i18n="ch3.quote">Not all memories are equally important. AI attention, like human attention, concentrates on what matters.</p>
<div class="card-grid stagger">
<div class="info-card">
<div class="card-icon">⏰</div>
<h4 data-i18n="ch3.card1.title">Recent tokens matter most</h4>
<p data-i18n="ch3.card1.desc">~70% of attention weight falls on the last 128 tokens. Old tokens rarely get looked at.</p>
</div>
<div class="info-card">
<div class="card-icon">🔑</div>
<h4 data-i18n="ch3.card2.title">Keys are more sensitive than Values</h4>
<p data-i18n="ch3.card2.desc">Key errors get amplified by softmax (nonlinear). Value errors propagate linearly — much more forgiving.</p>
</div>
<div class="info-card">
<div class="card-icon">⭐</div>
<h4 data-i18n="ch3.card3.title">A few tokens carry most information</h4>
<p data-i18n="ch3.card3.desc">Attention follows a power law: "heavy hitter" tokens get high attention across all queries.</p>
</div>
<div class="info-card">
<div class="card-icon">📊</div>
<h4 data-i18n="ch3.card4.title">Deep layers attend sharply</h4>
<p data-i18n="ch3.card4.desc">Layer 11 entropy = 1.84 bits (~4 tokens). Layer 1 entropy = 6.29 bits (~78 tokens). Deep layers need less KV.</p>
</div>
</div>
<p class="reveal" style="margin-top:2rem" data-i18n-html="ch3.summary">These four observations correspond to four <strong>orthogonal compression dimensions</strong>. Because they're independent, their effects multiply:</p>
</div>
</section>
<!-- ===== Chapter 4: Four Techniques ===== -->
<section id="techniques" style="background:var(--bg2)">
<div class="container">
<div class="section-label" data-i18n="ch4.label">Chapter 4</div>
<h2 class="reveal" data-i18n="ch4.title">Four Dimensions of Compression</h2>
<!-- Pipeline visualization -->
<div class="pipeline reveal" id="pipeline">
<div class="pipe-stage" style="transition-delay:.1s"><div class="stage-icon">⏰</div><div class="stage-name">Progressive</div><div class="stage-detail" data-i18n="ch4.pipe.time">Time dimension</div></div>
<div class="pipe-arrow">→</div>
<div class="pipe-stage" style="transition-delay:.3s"><div class="stage-icon">🔑</div><div class="stage-name">K/V Asymmetry</div><div class="stage-detail" data-i18n="ch4.pipe.tensor">Tensor dimension</div></div>
<div class="pipe-arrow">→</div>
<div class="pipe-stage" style="transition-delay:.5s"><div class="stage-icon">⭐</div><div class="stage-name">H2O Eviction</div><div class="stage-detail" data-i18n="ch4.pipe.token">Token dimension</div></div>
<div class="pipe-arrow">→</div>
<div class="pipe-stage" style="transition-delay:.7s"><div class="stage-icon">📊</div><div class="stage-name">PyramidKV</div><div class="stage-detail" data-i18n="ch4.pipe.layer">Layer dimension</div></div>
<div class="pipe-arrow">→</div>
<div class="pipe-result"><div class="stage-icon">🎯</div><div class="stage-name">6.4x + 59% faster</div><div class="stage-detail">+3% PPL</div></div>
</div>
<!-- Technique 1: Progressive -->
<h3 class="reveal" data-i18n-html="ch4.t1.title">1. Progressive Compression <span style="color:var(--text3);font-size:.9rem">(Time Dimension)</span></h3>
<p class="reveal" data-i18n="ch4.t1.desc">Keep the last 128 tokens' Keys at full precision (FP32). Compress everything else to 4-bit. The attention mechanism naturally focuses on recent tokens, so the compressed old tokens barely affect output quality.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch4.t1.viz.title">KV Cache Layout: Progressive k128</div>
<div id="kv-progressive" class="kv-anim"><!-- Filled by JS --></div>
<div style="display:flex;gap:1.5rem;font-size:.75rem;margin-top:.8rem">
<span data-i18n-html="ch4.t1.viz.fp32"><span style="display:inline-block;width:12px;height:12px;background:var(--accent2);border-radius:2px;vertical-align:middle"></span> FP32 (recent 128)</span>
<span data-i18n-html="ch4.t1.viz.4bit"><span style="display:inline-block;width:12px;height:12px;background:var(--green);border-radius:2px;vertical-align:middle"></span> 4-bit (older tokens)</span>
</div>
<div style="margin-top:1rem;font-size:.85rem;color:var(--text2)" data-i18n-html="ch4.t1.viz.result"><strong>Result:</strong> 2.9x compression at +1.3% PPL. Context-length invariant — works at 4K, 32K, or 128K.</div>
</div>
<!-- Technique 2: K/V Asymmetry -->
<h3 class="reveal" data-i18n-html="ch4.t2.title">2. K/V Asymmetric Quantization <span style="color:var(--text3);font-size:.9rem">(Tensor Dimension)</span></h3>
<p class="reveal" data-i18n-html="ch4.t2.desc">Key errors pass through <code>softmax(Q × K<sup>T</sup>)</code> — a nonlinear function that amplifies small errors exponentially. Value errors are simply multiplied by attention weights — a linear operation with no amplification.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch4.t2.viz.title">Error Propagation: Key vs Value</div>
<div style="display:grid;grid-template-columns:1fr 1fr;gap:2rem;margin:1rem 0">
<div>
<div style="font-weight:600;color:var(--orange);margin-bottom:.5rem" data-i18n="ch4.t2.viz.keypath">Key Error Path</div>
<div style="font-family:monospace;font-size:.8rem;line-height:2;color:var(--text2)" data-i18n-html="ch4.t2.viz.keydetail">
K + error<br>
↓ Q × (K + error)<sup>T</sup><br>
↓ <strong style="color:var(--red)">softmax</strong> ← nonlinear amplification!<br>
↓ wrong attention distribution<br>
↓ <strong style="color:var(--red)">cascading output error</strong>
</div>
</div>
<div>
<div style="font-weight:600;color:var(--green);margin-bottom:.5rem" data-i18n="ch4.t2.viz.valuepath">Value Error Path</div>
<div style="font-family:monospace;font-size:.8rem;line-height:2;color:var(--text2)" data-i18n-html="ch4.t2.viz.valuedetail">
V + error<br>
↓ attention_weights × (V + error)<br>
↓ <strong style="color:var(--green)">linear sum</strong> ← no amplification<br>
↓ small output perturbation<br>
↓ <strong style="color:var(--green)">bounded, predictable error</strong>
</div>
</div>
</div>
<div style="margin-top:1rem;font-size:.85rem;color:var(--text2)" data-i18n-html="ch4.t2.viz.result"><strong>Result:</strong> K=4bit + V=4bit + k128 = <strong>6.4x compression at +3.0% PPL</strong>. Adding V=Q4 on top of k128 costs only +1.7pp.</div>
</div>
<!-- Technique 3: H2O -->
<h3 class="reveal" data-i18n-html="ch4.t3.title">3. H2O Token Eviction <span style="color:var(--text3);font-size:.9rem">(Token Dimension)</span></h3>
<p class="reveal" data-i18n="ch4.t3.desc">Not all tokens contribute equally to attention. The Heavy-Hitter Oracle (H2O) tracks cumulative attention weight per token and evicts the ones that consistently receive near-zero attention.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch4.t3.viz.title">Token Importance Distribution (Power Law)</div>
<div id="token-importance" style="display:flex;align-items:flex-end;gap:2px;height:100px"><!-- Filled by JS --></div>
<div style="display:flex;justify-content:space-between;font-size:.7rem;color:var(--text3);margin-top:.5rem">
<span data-i18n="ch4.t3.viz.sink">← Sink tokens (always kept)</span><span data-i18n="ch4.t3.viz.heavy">Heavy hitters</span><span data-i18n="ch4.t3.viz.evict">Low attention → evict</span>
</div>
<div style="margin-top:1rem;font-size:.85rem;color:var(--text2)" data-i18n-html="ch4.t3.viz.result"><strong>Result:</strong> Attention cost reduced by <strong>59%</strong> at budget=128. Output quality preserved — evicted tokens had near-zero attention anyway.</div>
</div>
<!-- Technique 4: PyramidKV -->
<h3 class="reveal" data-i18n-html="ch4.t4.title">4. PyramidKV <span style="color:var(--text3);font-size:.9rem">(Layer Dimension)</span></h3>
<p class="reveal" data-i18n="ch4.t4.desc">Different layers have vastly different attention patterns. Early layers attend broadly (high entropy), deep layers attend sharply (low entropy). Allocating uniform KV budget wastes memory on layers that only look at 4 tokens.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch4.t4.viz.title">Attention Entropy by Layer (Llama 3.2 1B, measured)</div>
<div id="entropy-chart">
<!-- Filled by JS -->
</div>
<div style="margin-top:1rem;font-size:.85rem;color:var(--text2)" data-i18n-html="ch4.t4.viz.result">
<strong>Pyramid budget:</strong> Layer 0 gets 256 KV entries, Layer 15 gets 64. Deep layers with 1.84-bit entropy need only ~4 tokens — giving them 256 is pure waste.
</div>
</div>
</div>
</section>
<!-- ===== Chapter 5: Benchmarks ===== -->
<section id="benchmarks">
<div class="container">
<div class="section-label" data-i18n="ch5.label">Chapter 5</div>
<h2 class="reveal" data-i18n="ch5.title">Benchmarks</h2>
<p class="reveal" data-i18n="ch5.intro">All measurements on Llama 3.2 1B Instruct (Q8_0 GGUF), Apple M1 Pro, 8 threads.</p>
<h3 class="reveal" data-i18n="ch5.quality.title">Compression vs Quality</h3>
<div class="reveal">
<table>
<thead><tr><th data-i18n="ch5.th.config">Configuration</th><th>PPL</th><th data-i18n="ch5.th.vsfp32">vs FP32</th><th data-i18n="ch5.th.compression">Compression</th><th>Attention</th></tr></thead>
<tbody>
<tr><td data-i18n="ch5.row.fp32">FP32 baseline</td><td>151.2</td><td class="dim">—</td><td>1.0x</td><td>100%</td></tr>
<tr><td>K=4b + V=FP16 + k128</td><td>153.2</td><td>+1.3%</td><td>2.9x</td><td>100%</td></tr>
<tr class="highlight-row"><td><strong>K=4b + V=Q4 + k128</strong></td><td><strong>155.7</strong></td><td><strong>+3.0%</strong></td><td><strong>6.4x</strong></td><td>100%</td></tr>
<tr class="highlight-row"><td><strong>+ PyramidKV (b=256)</strong></td><td><strong data-i18n="ch5.row.same">~same</strong></td><td><strong data-i18n="ch5.row.same2">~same</strong></td><td><strong>6.4x+</strong></td><td><strong>41%</strong></td></tr>
<tr><td>K=3b + V=Q4 + k128</td><td>166.0</td><td>+9.8%</td><td>7.1x</td><td>100%</td></tr>
<tr><td>K=4b + V=Q2 + k128</td><td style="color:var(--red)">306.1</td><td style="color:var(--red)">+102%</td><td>8.0x</td><td class="dim" data-i18n="ch5.row.failed">failed</td></tr>
</tbody>
</table>
</div>
<h3 class="reveal" data-i18n="ch5.vs.title">vs llama.cpp</h3>
<p class="reveal" data-i18n="ch5.vs.desc">Same 4-bit budget, 3.5x less quality degradation:</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="ch5.vs.viz.title">PPL Degradation at 4-bit (lower is better)</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span>llama.cpp Q4_0 KV</span><span style="color:var(--red)">+10.6%</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-fp32" style="--w:100%"></div></div>
</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span>quant.cpp K=4b + V=Q4 + k128</span><span style="color:var(--green)">+3.0%</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-aggr" style="--w:28%"></div></div>
</div>
</div>
<h3 class="reveal" data-i18n="ch5.context.title">Context Length on 8GB Mac</h3>
<div class="reveal">
<table>
<thead><tr><th data-i18n="ch5.ctx.th.context">Context</th><th data-i18n="ch5.ctx.th.fp32">FP32 KV</th><th data-i18n="ch5.ctx.th.prog">Progressive (2.9x)</th><th data-i18n="ch5.ctx.th.aggr">Aggressive (6.4x)</th><th data-i18n="ch5.ctx.th.evict">+ Eviction</th></tr></thead>
<tbody>
<tr><td>4K</td><td>OK</td><td>OK</td><td>OK</td><td data-i18n="ch5.ctx.fastest">OK (fastest)</td></tr>
<tr><td>16K</td><td data-i18n="ch5.ctx.borderline">borderline</td><td>OK</td><td>OK</td><td>OK</td></tr>
<tr><td>32K</td><td style="color:var(--red)">OOM</td><td>5.5 GB</td><td><strong>2.5 GB</strong></td><td><strong>~1.5 GB</strong></td></tr>
<tr><td>64K</td><td style="color:var(--red)">OOM</td><td style="color:var(--red)">OOM</td><td>5.0 GB</td><td>~3 GB</td></tr>
<tr class="highlight-row"><td><strong>128K</strong></td><td style="color:var(--red)">OOM</td><td style="color:var(--red)">OOM</td><td>16GB Mac</td><td><strong>~5 GB</strong></td></tr>
</tbody>
</table>
</div>
</div>
</section>
<!-- ===== Chapter 5.5: Beyond RAG ===== -->
<section id="beyond-rag">
<div class="container">
<div class="section-label" data-i18n="rag.label">Movement</div>
<h2 class="reveal" data-i18n="rag.title">Beyond RAG</h2>
<blockquote class="reveal" style="border-left:3px solid var(--accent);padding:1rem 1.5rem;margin:1.5rem 0;background:rgba(108,92,231,.05);font-size:1.1rem;line-height:1.6;color:var(--text)" data-i18n-html="rag.quote">
<strong>Chunking RAG was a workaround for small context windows.</strong><br>
The workaround became dogma.<br>
Now context windows are big enough that we don't need the workaround.<br>
<em style="color:var(--accent2)">— Welcome to Beyond RAG.</em>
</blockquote>
<p class="reveal" data-i18n-html="rag.intro">Traditional RAG splits documents into 512-token chunks, embeds them in a vector database, and retrieves fragments. This was a reasonable engineering compromise when LLMs had 2K context windows. <strong>Now they have 128K. The compromise should have started disappearing.</strong></p>
<p class="reveal" data-i18n="rag.para2">It didn't. The infrastructure became dogma. Vector DBs became billion-dollar companies. "RAG pipeline" became something every AI engineer was expected to build, regardless of whether their use case actually needed one.</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="rag.viz.title">Chunk-Level RAG vs Document-Level RAG</div>
<div style="display:grid;grid-template-columns:1fr 1fr;gap:2rem;margin:1rem 0">
<div>
<div style="font-weight:600;color:var(--orange);margin-bottom:.5rem" data-i18n="rag.chunk.title">Chunk-Level RAG</div>
<div style="font-family:monospace;font-size:.8rem;line-height:2;color:var(--text2)">
100K docs<br>
↓ chunk (512 tokens)<br>
↓ embed → vector DB<br>
↓ search → 5 chunks<br>
↓ LLM (4K context)<br>
<strong style="color:var(--red)" data-i18n="rag.chunk.result">✗ Cross-page info lost</strong>
</div>
</div>
<div>
<div style="font-weight:600;color:var(--green);margin-bottom:.5rem" data-i18n="rag.doc.title">Document-Level RAG</div>
<div style="font-family:monospace;font-size:.8rem;line-height:2;color:var(--text2)">
100K docs<br>
↓ doc-level index<br>
↓ search → 2-3 full docs<br>
↓ LLM (<strong style="color:var(--green)">64K-128K</strong> context)<br>
↓ KV compression makes it fit<br>
<strong style="color:var(--green)" data-i18n="rag.doc.result">✓ Full document understanding</strong>
</div>
</div>
</div>
</div>
<h3 class="reveal" data-i18n="rag.complementary.title">Complementary, Not Competitive</h3>
<p class="reveal" data-i18n-html="rag.complementary.desc">RAG decides <strong>which documents</strong> to look at. Long-context decides <strong>how deeply</strong> to understand them. Each does what it's best at.</p>
<div class="card-grid stagger">
<div class="info-card">
<div class="card-icon">⨁</div>
<h4 data-i18n="rag.card1.t">RAG's weakness → Long-Context solves</h4>
<p data-i18n="rag.card1.d">Chunk boundaries lose cross-page relationships. Multi-hop reasoning fails. Long-context keeps the full document — no information loss.</p>
</div>
<div class="info-card">
<div class="card-icon">⨁</div>
<h4 data-i18n="rag.card2.t">Long-Context's weakness → RAG solves</h4>
<p data-i18n="rag.card2.d">Can't fit 100K documents in context. Prefill is slow. RAG narrows the search to 2-3 relevant documents that DO fit.</p>
</div>
<div class="info-card">
<div class="card-icon">💾</div>
<h4 data-i18n="rag.card3.t">Read Once, Query Forever</h4>
<p data-i18n="rag.card3.d">Pre-process documents into .kv files (GPU, once). Load instantly on any laptop (0.5s). Query offline, unlimited, private.</p>
</div>
</div>
<div class="viz reveal">
<div class="viz-title" data-i18n="rag.pipeline.title">Pre-computed KV Library Pattern</div>
<div style="font-family:monospace;font-size:.8rem;line-height:2;color:var(--text2)">
<div style="color:var(--text3)"># Once (GPU or overnight batch)</div>
<div>m.ask(open("<span style="color:var(--accent2)">manual.txt</span>").read())</div>
<div>m.save_context("<span style="color:var(--green)">manual.kv</span>") <span style="color:var(--text3)"># 1.5 GB compressed</span></div>
<br>
<div style="color:var(--text3)"># Anytime (laptop, offline, instant)</div>
<div>m.load_context("<span style="color:var(--green)">manual.kv</span>") <span style="color:var(--text3)"># 0.5 seconds</span></div>
<div>m.ask("<span style="color:var(--accent2)">What's the expense process?</span>")</div>
</div>
</div>
</div>
</section>
<!-- ===== Verification Box ===== -->
<section id="verification">
<div class="container">
<div class="section-label" data-i18n="verify.label">Measured Result</div>
<h2 class="reveal" data-i18n="verify.title">7/7 vs 0/7 — Verified</h2>
<p class="reveal" data-i18n-html="verify.intro">We compared three approaches on a synthetic 5-section document with 7 questions (4 single-hop, 3 multi-hop). Tested with <strong>Llama 3.2 3B Q8_0</strong>:</p>
<div class="viz reveal">
<div class="viz-title" data-i18n="verify.viz.title">Fact Extraction Accuracy</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span data-i18n="verify.bar1.label">Chunk-RAG (wrong section retrieved)</span><span style="color:var(--red)" data-i18n="verify.bar1.val">0/7 — all hallucinated</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-fp32" style="--w:0%">0%</div></div>
</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span data-i18n="verify.bar2.label">Full Document (FP32 KV)</span><span style="color:var(--green)">7/7</span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-aggr" style="--w:100%">100%</div></div>
</div>
<div class="mem-bar-container">
<div class="mem-bar-label"><span data-i18n-html="verify.bar3.label"><strong>Full Document (6.4x KV compression)</strong></span><span style="color:var(--green)"><strong>7/7</strong></span></div>
<div class="mem-bar"><div class="mem-bar-fill bar-aggr" style="--w:100%" data-i18n="verify.bar3.inner">100% — same as FP32</div></div>
</div>
</div>
<h3 class="reveal" data-i18n="verify.halluc.title">The Hallucination Problem</h3>
<p class="reveal" data-i18n-html="verify.halluc.desc">When chunk-RAG retrieved the wrong section, the model didn't say "I don't know" — it generated <strong>plausible-sounding lies</strong>:</p>
<div class="viz reveal">
<div style="font-family:monospace;font-size:.85rem;line-height:2;color:var(--text2)" data-i18n-html="verify.halluc.examples">
<div><span style="color:var(--accent2)">Q:</span> Who is the CTO?</div>
<div><span style="color:var(--red)">Chunk-RAG:</span> "John Smith"   <span style="color:var(--text3)">→ truth: Maria Santos</span></div>
<br>
<div><span style="color:var(--accent2)">Q:</span> What is the revenue?</div>
<div><span style="color:var(--red)">Chunk-RAG:</span> "$1,000,000"   <span style="color:var(--text3)">→ truth: 847 million</span></div>
<br>
<div><span style="color:var(--accent2)">Q:</span> What percent is R&D?</div>
<div><span style="color:var(--red)">Chunk-RAG:</span> "15% of net income"   <span style="color:var(--text3)">→ truth: 14% of revenue</span></div>
</div>
</div>
<p class="reveal" style="color:var(--text);font-weight:500;font-size:1.1rem" data-i18n-html="verify.halluc.summary">This is the fundamental danger of chunk-RAG: <strong>retrieval failure becomes silent hallucination</strong>. KV compression makes it possible to load the entire document into context, eliminating this failure mode on consumer hardware.</p>
<div class="card-grid stagger" style="margin-top:2rem">
<div class="info-card">
<div class="card-icon">✅</div>
<h4 data-i18n="verify.card1.t">KV Compression = Zero Quality Loss</h4>
<p data-i18n="verify.card1.d">FP32 7/7 = 6.4x compressed 7/7. The 6.4x memory savings cost nothing in fact extraction quality.</p>
</div>
<div class="info-card">
<div class="card-icon">🔗</div>
<h4 data-i18n="verify.card2.t">Multi-Hop Reasoning Works</h4>
<p data-i18n="verify.card2.d">"What risk affects the growth region?" requires linking Section 3 (Asia growth) with Section 5 (Asia currency risk). Full-doc: ✓. Chunk-RAG: impossible.</p>
</div>
<div class="info-card">
<div class="card-icon">💻</div>
<h4 data-i18n="verify.card3.t">Runs on 16GB Mac</h4>
<p data-i18n="verify.card3.d">Llama 3.2 3B Q8_0, no GPU. 6.4x KV compression makes this practical on consumer hardware.</p>
</div>
</div>
<div style="text-align:center;margin-top:3rem">
<a href="https://github.com/quantumaikr/quant.cpp/blob/main/docs/beyond-rag-manifesto.md" class="cta-btn cta-primary" style="font-size:.95rem" data-i18n-html="verify.cta">Read the Beyond RAG Manifesto →</a>
</div>
</div>
</section>
<!-- ===== Chapter 6: Papers ===== -->
<section id="papers" style="background:var(--bg2)">
<div class="container">
<div class="section-label" data-i18n="ch6.label">Chapter 6</div>
<h2 class="reveal" data-i18n="ch6.title">Research Foundations</h2>
<p class="reveal" data-i18n="ch6.intro">Each technique in quant.cpp is grounded in peer-reviewed research:</p>
<div class="stagger" style="margin-top:1.5rem">
<div class="paper">
<div class="paper-title">TurboQuant: Redefining AI Efficiency with Extreme Compression</div>
<div class="paper-meta">ICLR 2026 · Google Research · <a href="https://arxiv.org/abs/2504.19874">arXiv:2504.19874</a></div>
<div class="paper-desc" data-i18n-html="ch6.paper1.desc">Random Hadamard Transform (RHT) normalizes activation distributions before Lloyd-Max codebook quantization. Foundation of our <code>turbo_kv_*</code> types.</div>
</div>
<div class="paper">
<div class="paper-title">KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache</div>
<div class="paper-meta">ICML 2024 · <a href="https://arxiv.org/abs/2402.02750">arXiv:2402.02750</a></div>
<div class="paper-desc" data-i18n="ch6.paper2.desc">Key insight: per-channel quantization for Keys, per-token for Values. K and V have fundamentally different error sensitivity due to softmax nonlinearity.</div>
</div>
<div class="paper">
<div class="paper-title">H2O: Heavy-Hitter Oracle for Efficient Generative Inference</div>
<div class="paper-meta">NeurIPS 2023 · <a href="https://arxiv.org/abs/2306.14048">arXiv:2306.14048</a></div>
<div class="paper-desc" data-i18n="ch6.paper3.desc">Attention follows a power law. Keep "sink" tokens + "heavy hitters" (high cumulative attention) + recent window. Evict the rest for O(1) KV budget.</div>
</div>
<div class="paper">
<div class="paper-title">PyramidKV: Dynamic KV Cache Compression based on Pyramidal Information Funneling</div>
<div class="paper-meta">Dec 2024 · <a href="https://arxiv.org/abs/2406.02069">arXiv:2406.02069</a></div>
<div class="paper-desc" data-i18n="ch6.paper4.desc">Attention entropy decreases with layer depth. Allocate larger KV budgets to early (high-entropy) layers, smaller to deep (low-entropy) layers.</div>
</div>
<div class="paper">
<div class="paper-title">PolarQuant & QJL</div>
<div class="paper-meta"><a href="https://arxiv.org/abs/2502.02617">arXiv:2502.02617</a> · <a href="https://arxiv.org/abs/2406.03482">arXiv:2406.03482</a></div>
<div class="paper-desc" data-i18n="ch6.paper5.desc">Polar decomposition for vector quantization; Johnson-Lindenstrauss random projection for 1-bit sign hashing. Both used in our hybrid turbo types.</div>
</div>
</div>
</div>
</section>
<!-- ===== Glossary ===== -->
<section id="glossary">
<div class="container">
<div class="section-label" data-i18n="glossary.label">Reference</div>
<h2 class="reveal" data-i18n="glossary.title">Glossary</h2>
<dl class="stagger">
<div class="glossary-item"><dt data-i18n="glossary.kv.term">KV Cache</dt><dd data-i18n="glossary.kv.def">Key-Value cache. Stores the Key and Value vectors for all past tokens, so they don't need to be recomputed. Grows linearly with sequence length.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.attn.term">Attention</dt><dd data-i18n-html="glossary.attn.def">The mechanism by which each token decides how much to "look at" each past token. Computed as softmax(Q × K<sup>T</sup>) × V. Cost is O(n) per token where n is sequence length.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.ppl.term">Perplexity (PPL)</dt><dd data-i18n="glossary.ppl.def">Measures how well the model predicts the next token. Lower is better. PPL=100 means the model is "100-ways confused" on average. A +3% increase means barely noticeable quality change.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.softmax.term">Softmax</dt><dd data-i18n="glossary.softmax.def">Converts raw scores into a probability distribution. Small changes in input can cause large changes in output (nonlinear amplification), which is why Key quantization errors are more damaging than Value errors.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.quant.term">Quantization</dt><dd data-i18n="glossary.quant.def">Reducing the number of bits per value. FP32 (32-bit) → FP16 (16-bit) → 4-bit → 2-bit. Each halving saves 50% memory but introduces approximation error.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.rht.term">RHT (Random Hadamard Transform)</dt><dd data-i18n="glossary.rht.def">A mathematical rotation that spreads out the distribution of values, making them more uniform and easier to quantize without large errors. Used in TurboQuant.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.prog.term">Progressive Compression</dt><dd data-i18n="glossary.prog.def">Keep recent tokens at full precision, compress older tokens aggressively. Inspired by how human memory works: recent events are vivid, old memories are fuzzy but sufficient.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.hh.term">Heavy Hitter</dt><dd data-i18n="glossary.hh.def">A token that consistently receives high attention weight from many queries. These tokens are informationally critical and should never be evicted.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.entropy.term">Attention Entropy</dt><dd data-i18n="glossary.entropy.def">Measures how spread out the attention distribution is. Low entropy = sharp focus on few tokens. High entropy = diffuse attention across many tokens. Measured in bits.</dd></div>
<div class="glossary-item"><dt data-i18n="glossary.gguf.term">GGUF</dt><dd data-i18n="glossary.gguf.def">The standard file format for quantized LLM model weights, created by the llama.cpp project. quant.cpp loads GGUF models directly.</dd></div>
</dl>
</div>
</section>
<!-- ===== CTA ===== -->
<section class="cta" style="background:var(--bg2)">
<div class="container reveal">
<h2 style="margin-bottom:1rem" data-i18n="cta.title">Try It Yourself</h2>
<p style="color:var(--text2);margin-bottom:2rem;max-width:560px;margin-left:auto;margin-right:auto" data-i18n="cta.desc">Ollama-style CLI. No GPU, no API key, no setup.</p>
<div style="display:flex;gap:1.5rem;flex-wrap:wrap;justify-content:center;margin-bottom:2rem;text-align:left">
<div>
<div style="font-size:.75rem;color:var(--text2);margin-bottom:.3rem;font-weight:600" data-i18n="cta.label.cli">CLI (v0.12.0+)</div>
<pre style="margin:0"><code>pip install quantcpp
quantcpp pull llama3.2:1b
quantcpp run llama3.2:1b
quantcpp serve llama3.2:1b -p 8080
quantcpp list</code></pre>
</div>
<div>
<div style="font-size:.75rem;color:var(--text2);margin-bottom:.3rem;font-weight:600" data-i18n="cta.label.python">Python API</div>
<pre style="margin:0"><code>from quantcpp import Model
m = Model.from_pretrained("Llama-3.2-1B")
print(m.ask("What is gravity?"))</code></pre>
</div>
</div>
<br>
<a href="https://github.com/quantumaikr/quant.cpp" class="cta-btn cta-primary">GitHub</a>
<a href="https://pypi.org/project/quantcpp/" class="cta-btn cta-secondary">PyPI</a>
<a href="https://quantumaikr.github.io/quant.cpp/" class="cta-btn cta-secondary">WASM Demo</a>
</div>
</section>
<!-- ===== Footer ===== -->
<footer>
<div class="container">
<p data-i18n-html="footer.text">quant.cpp · Apache 2.0 · <a href="https://github.com/quantumaikr/quant.cpp">GitHub</a> · Made by <a href="https://github.com/quantumaikr">quantumaikr</a></p>
</div>
</footer>
<!-- ===== JavaScript ===== -->
<script>
// === i18n translations ===
var i18n = {
en: {
"nav.problem": "The Problem",
"nav.solution": "Solution",
"nav.techniques": "4 Techniques",
"nav.benchmarks": "Benchmarks",
"nav.papers": "Papers",
"nav.glossary": "Glossary",
"hero.title": "AI's Memory,<br><span class=\"highlight\">Compressed 6.4x</span>",
"hero.subtitle": "An educational guide to KV cache compression \u2014 the key to running long-context AI on your laptop.",
"hero.stat.compression": "KV Compression",
"hero.stat.quality": "Quality Cost",
"hero.stat.attention": "Attention Speedup",
"hero.stat.loc": "Lines of C",
"toc.label": "Guide",
"toc.title": "What You'll Learn",
"toc.item1.title": "The Memory Problem",
"toc.item1.desc": "Why KV cache is the real bottleneck in AI inference",
"toc.item2.title": "What is KV Cache?",
"toc.item2.desc": "How transformers remember context, and why it costs so much",
"toc.item3.title": "The Insight",
"toc.item3.desc": "Not all memories are equally important",
"toc.item4.title": "4 Compression Techniques",
"toc.item4.desc": "Progressive, K/V Asymmetry, H2O Eviction, PyramidKV",
"toc.item5.title": "Benchmarks",
"toc.item5.desc": "Measured results on Llama 3.2 1B and Qwen3.5",
"toc.item6.title": "Research Papers",
"toc.item6.desc": "Academic foundations behind each technique",
"ch1.label": "Chapter 1",
"ch1.title": "The Real Bottleneck in AI",
"ch1.intro": "When you chat with an AI, it needs to remember everything you've said. This memory is called the <strong>KV cache</strong>. The shocking truth:",
"ch1.viz.title": "Memory Usage: Model vs KV Cache",
"ch1.viz.model": "AI Model (Llama 3.2 3B)",
"ch1.viz.kv": "KV Cache (32K context, FP16)",
"ch1.viz.kv.bar": "8.0 GB \u2014 larger than the model!",
"ch1.viz.compressed": "KV Cache with quant.cpp (6.4x)",
"ch1.explanation": "The KV cache grows with every token in the conversation. At 32K context, it's <strong>2x larger than the model itself</strong>. This is why your laptop runs out of memory during long conversations \u2014 not because the model is too big, but because its <em>memory</em> is.",
"ch2.label": "Chapter 2",
"ch2.title": "What is KV Cache?",
"ch2.intro": "In a Transformer, every token \"attends\" to all previous tokens. To do this, each token creates a <strong>Key</strong> (what am I?) and a <strong>Value</strong> (what do I contain?). These are stored so future tokens can look back at them.",
"ch2.viz.title": "How Attention Works (Simplified)",
"ch2.viz.query": "Current token creates: <strong style=\"color:var(--accent2)\">Query</strong> = \"What am I looking for?\"",
"ch2.viz.kv": "Each past token stored: <strong style=\"color:var(--orange)\">Key</strong> = \"This is what I am\" <strong style=\"color:var(--green)\">Value</strong> = \"This is my content\"",
"ch2.viz.formula": "<strong style=\"color:var(--accent3)\">Attention</strong> = softmax(<strong style=\"color:var(--accent2)\">Q</strong> × <strong style=\"color:var(--orange)\">K</strong><sup>T</sup>) × <strong style=\"color:var(--green)\">V</strong>",
"ch2.viz.summary": "\u2192 \"Look at all past tokens, focus on the relevant ones, blend their values\"",
"ch2.expensive.title": "Why It's Expensive",
"ch2.expensive.desc": "For <strong>every layer</strong> and <strong>every token position</strong>, we store a Key vector and a Value vector. A typical model has 16-32 layers. At 32K context:",
"ch2.growth.title": "KV Cache Growth",
"ch2.growth.1k": "1K tokens",
"ch2.growth.desc": "Every doubling of context = doubling of KV cache. And the <strong>attention cost is O(n)</strong> per token \u2014 at 1000 tokens, attention is already 35% of total compute time.",
"ch3.label": "Chapter 3",
"ch3.title": "The Key Insight",
"ch3.quote": "Not all memories are equally important. AI attention, like human attention, concentrates on what matters.",
"ch3.card1.title": "Recent tokens matter most",
"ch3.card1.desc": "~70% of attention weight falls on the last 128 tokens. Old tokens rarely get looked at.",
"ch3.card2.title": "Keys are more sensitive than Values",
"ch3.card2.desc": "Key errors get amplified by softmax (nonlinear). Value errors propagate linearly \u2014 much more forgiving.",
"ch3.card3.title": "A few tokens carry most information",
"ch3.card3.desc": "Attention follows a power law: \"heavy hitter\" tokens get high attention across all queries.",
"ch3.card4.title": "Deep layers attend sharply",
"ch3.card4.desc": "Layer 11 entropy = 1.84 bits (~4 tokens). Layer 1 entropy = 6.29 bits (~78 tokens). Deep layers need less KV.",
"ch3.summary": "These four observations correspond to four <strong>orthogonal compression dimensions</strong>. Because they're independent, their effects multiply:",
"ch4.label": "Chapter 4",
"ch4.title": "Four Dimensions of Compression",
"ch4.pipe.time": "Time dimension",
"ch4.pipe.tensor": "Tensor dimension",
"ch4.pipe.token": "Token dimension",
"ch4.pipe.layer": "Layer dimension",
"ch4.t1.title": "1. Progressive Compression <span style=\"color:var(--text3);font-size:.9rem\">(Time Dimension)</span>",
"ch4.t1.desc": "Keep the last 128 tokens' Keys at full precision (FP32). Compress everything else to 4-bit. The attention mechanism naturally focuses on recent tokens, so the compressed old tokens barely affect output quality.",
"ch4.t1.viz.title": "KV Cache Layout: Progressive k128",
"ch4.t1.viz.fp32": "<span style=\"display:inline-block;width:12px;height:12px;background:var(--accent2);border-radius:2px;vertical-align:middle\"></span> FP32 (recent 128)",
"ch4.t1.viz.4bit": "<span style=\"display:inline-block;width:12px;height:12px;background:var(--green);border-radius:2px;vertical-align:middle\"></span> 4-bit (older tokens)",
"ch4.t1.viz.result": "<strong>Result:</strong> 2.9x compression at +1.3% PPL. Context-length invariant \u2014 works at 4K, 32K, or 128K.",
"ch4.t2.title": "2. K/V Asymmetric Quantization <span style=\"color:var(--text3);font-size:.9rem\">(Tensor Dimension)</span>",
"ch4.t2.desc": "Key errors pass through <code>softmax(Q \u00d7 K<sup>T</sup>)</code> \u2014 a nonlinear function that amplifies small errors exponentially. Value errors are simply multiplied by attention weights \u2014 a linear operation with no amplification.",
"ch4.t2.viz.title": "Error Propagation: Key vs Value",
"ch4.t2.viz.keypath": "Key Error Path",
"ch4.t2.viz.keydetail": "K + error<br>↓ Q \u00d7 (K + error)<sup>T</sup><br>↓ <strong style=\"color:var(--red)\">softmax</strong> ← nonlinear amplification!<br>↓ wrong attention distribution<br>↓ <strong style=\"color:var(--red)\">cascading output error</strong>",
"ch4.t2.viz.valuepath": "Value Error Path",
"ch4.t2.viz.valuedetail": "V + error<br>↓ attention_weights \u00d7 (V + error)<br>↓ <strong style=\"color:var(--green)\">linear sum</strong> ← no amplification<br>↓ small output perturbation<br>↓ <strong style=\"color:var(--green)\">bounded, predictable error</strong>",
"ch4.t2.viz.result": "<strong>Result:</strong> K=4bit + V=4bit + k128 = <strong>6.4x compression at +3.0% PPL</strong>. Adding V=Q4 on top of k128 costs only +1.7pp.",
"ch4.t3.title": "3. H2O Token Eviction <span style=\"color:var(--text3);font-size:.9rem\">(Token Dimension)</span>",
"ch4.t3.desc": "Not all tokens contribute equally to attention. The Heavy-Hitter Oracle (H2O) tracks cumulative attention weight per token and evicts the ones that consistently receive near-zero attention.",
"ch4.t3.viz.title": "Token Importance Distribution (Power Law)",
"ch4.t3.viz.sink": "\u2190 Sink tokens (always kept)",
"ch4.t3.viz.heavy": "Heavy hitters",
"ch4.t3.viz.evict": "Low attention \u2192 evict",
"ch4.t3.viz.result": "<strong>Result:</strong> Attention cost reduced by <strong>59%</strong> at budget=128. Output quality preserved \u2014 evicted tokens had near-zero attention anyway.",
"ch4.t4.title": "4. PyramidKV <span style=\"color:var(--text3);font-size:.9rem\">(Layer Dimension)</span>",
"ch4.t4.desc": "Different layers have vastly different attention patterns. Early layers attend broadly (high entropy), deep layers attend sharply (low entropy). Allocating uniform KV budget wastes memory on layers that only look at 4 tokens.",
"ch4.t4.viz.title": "Attention Entropy by Layer (Llama 3.2 1B, measured)",
"ch4.t4.viz.result": "<strong>Pyramid budget:</strong> Layer 0 gets 256 KV entries, Layer 15 gets 64. Deep layers with 1.84-bit entropy need only ~4 tokens \u2014 giving them 256 is pure waste.",
"ch5.label": "Chapter 5",
"ch5.title": "Benchmarks",
"ch5.intro": "All measurements on Llama 3.2 1B Instruct (Q8_0 GGUF), Apple M1 Pro, 8 threads.",
"ch5.quality.title": "Compression vs Quality",
"ch5.th.config": "Configuration",
"ch5.th.vsfp32": "vs FP32",
"ch5.th.compression": "Compression",
"ch5.row.fp32": "FP32 baseline",
"ch5.row.same": "~same",
"ch5.row.same2": "~same",
"ch5.row.failed": "failed",
"ch5.vs.title": "vs llama.cpp",
"ch5.vs.desc": "Same 4-bit budget, 3.5x less quality degradation:",
"ch5.vs.viz.title": "PPL Degradation at 4-bit (lower is better)",
"ch5.context.title": "Context Length on 8GB Mac",
"ch5.ctx.th.context": "Context",
"ch5.ctx.th.fp32": "FP32 KV",
"ch5.ctx.th.prog": "Progressive (2.9x)",
"ch5.ctx.th.aggr": "Aggressive (6.4x)",
"ch5.ctx.th.evict": "+ Eviction",
"ch5.ctx.fastest": "OK (fastest)",
"ch5.ctx.borderline": "borderline",
"ch6.label": "Chapter 6",
"ch6.title": "Research Foundations",
"ch6.intro": "Each technique in quant.cpp is grounded in peer-reviewed research:",
"ch6.paper1.desc": "Random Hadamard Transform (RHT) normalizes activation distributions before Lloyd-Max codebook quantization. Foundation of our <code>turbo_kv_*</code> types.",
"ch6.paper2.desc": "Key insight: per-channel quantization for Keys, per-token for Values. K and V have fundamentally different error sensitivity due to softmax nonlinearity.",
"ch6.paper3.desc": "Attention follows a power law. Keep \"sink\" tokens + \"heavy hitters\" (high cumulative attention) + recent window. Evict the rest for O(1) KV budget.",
"ch6.paper4.desc": "Attention entropy decreases with layer depth. Allocate larger KV budgets to early (high-entropy) layers, smaller to deep (low-entropy) layers.",
"ch6.paper5.desc": "Polar decomposition for vector quantization; Johnson-Lindenstrauss random projection for 1-bit sign hashing. Both used in our hybrid turbo types.",
"glossary.label": "Reference",
"glossary.title": "Glossary",
"glossary.kv.term": "KV Cache",
"glossary.kv.def": "Key-Value cache. Stores the Key and Value vectors for all past tokens, so they don't need to be recomputed. Grows linearly with sequence length.",
"glossary.attn.term": "Attention",
"glossary.attn.def": "The mechanism by which each token decides how much to \"look at\" each past token. Computed as softmax(Q \u00d7 K<sup>T</sup>) \u00d7 V. Cost is O(n) per token where n is sequence length.",
"glossary.ppl.term": "Perplexity (PPL)",
"glossary.ppl.def": "Measures how well the model predicts the next token. Lower is better. PPL=100 means the model is \"100-ways confused\" on average. A +3% increase means barely noticeable quality change.",
"glossary.softmax.term": "Softmax",
"glossary.softmax.def": "Converts raw scores into a probability distribution. Small changes in input can cause large changes in output (nonlinear amplification), which is why Key quantization errors are more damaging than Value errors.",
"glossary.quant.term": "Quantization",
"glossary.quant.def": "Reducing the number of bits per value. FP32 (32-bit) \u2192 FP16 (16-bit) \u2192 4-bit \u2192 2-bit. Each halving saves 50% memory but introduces approximation error.",
"glossary.rht.term": "RHT (Random Hadamard Transform)",
"glossary.rht.def": "A mathematical rotation that spreads out the distribution of values, making them more uniform and easier to quantize without large errors. Used in TurboQuant.",
"glossary.prog.term": "Progressive Compression",
"glossary.prog.def": "Keep recent tokens at full precision, compress older tokens aggressively. Inspired by how human memory works: recent events are vivid, old memories are fuzzy but sufficient.",
"glossary.hh.term": "Heavy Hitter",
"glossary.hh.def": "A token that consistently receives high attention weight from many queries. These tokens are informationally critical and should never be evicted.",
"glossary.entropy.term": "Attention Entropy",
"glossary.entropy.def": "Measures how spread out the attention distribution is. Low entropy = sharp focus on few tokens. High entropy = diffuse attention across many tokens. Measured in bits.",
"glossary.gguf.term": "GGUF",
"glossary.gguf.def": "The standard file format for quantized LLM model weights, created by the llama.cpp project. quant.cpp loads GGUF models directly.",
"cta.title": "Try It Yourself",
"cta.desc": "Ollama-style CLI. No GPU, no API key, no setup.",
"cta.label.cli": "CLI (v0.12.0+)",
"cta.label.python": "Python API",
"rag.label": "Movement",
"rag.title": "Beyond RAG",
"rag.intro": "Traditional RAG splits documents into 512-token chunks, embeds them in a vector database, and retrieves fragments. This was a reasonable engineering compromise when LLMs had 2K context windows. <strong>Now they have 128K. The compromise should have started disappearing.</strong>",
"rag.viz.title": "Chunk-Level RAG vs Document-Level RAG",
"rag.chunk.title": "Chunk-Level RAG",
"rag.chunk.result": "✗ Cross-page info lost",
"rag.doc.title": "Document-Level RAG",
"rag.doc.result": "✓ Full document understanding",
"rag.complementary.title": "Complementary, Not Competitive",
"rag.complementary.desc": "RAG decides <strong>which documents</strong> to look at. Long-context decides <strong>how deeply</strong> to understand them. Each does what it's best at.",
"rag.card1.t": "RAG's weakness → Long-Context solves",
"rag.card1.d": "Chunk boundaries lose cross-page relationships. Multi-hop reasoning fails. Long-context keeps the full document — no information loss.",
"rag.card2.t": "Long-Context's weakness → RAG solves",
"rag.card2.d": "Can't fit 100K documents in context. Prefill is slow. RAG narrows the search to 2-3 relevant documents that DO fit.",
"rag.card3.t": "Read Once, Query Forever",
"rag.card3.d": "Pre-process documents into .kv files (GPU, once). Load instantly on any laptop (0.5s). Query offline, unlimited, private.",
"rag.pipeline.title": "Pre-computed KV Library Pattern",
"rag.quote": "<strong>Chunking RAG was a workaround for small context windows.</strong><br>The workaround became dogma.<br>Now context windows are big enough that we don't need the workaround.<br><em style=\"color:var(--accent2)\">— Welcome to Beyond RAG.</em>",
"rag.para2": "It didn't. The infrastructure became dogma. Vector DBs became billion-dollar companies. \"RAG pipeline\" became something every AI engineer was expected to build, regardless of whether their use case actually needed one.",
"verify.label": "Measured Result",
"verify.title": "7/7 vs 0/7 — Verified",
"verify.intro": "We compared three approaches on a synthetic 5-section document with 7 questions (4 single-hop, 3 multi-hop). Tested with <strong>Llama 3.2 3B Q8_0</strong>:",
"verify.viz.title": "Fact Extraction Accuracy",
"verify.bar1.label": "Chunk-RAG (wrong section retrieved)",
"verify.bar1.val": "0/7 — all hallucinated",
"verify.bar2.label": "Full Document (FP32 KV)",
"verify.bar3.label": "<strong>Full Document (6.4x KV compression)</strong>",
"verify.bar3.inner": "100% — same as FP32",
"verify.halluc.title": "The Hallucination Problem",
"verify.halluc.desc": "When chunk-RAG retrieved the wrong section, the model didn't say \"I don't know\" — it generated <strong>plausible-sounding lies</strong>:",
"verify.halluc.examples": "<div><span style=\"color:var(--accent2)\">Q:</span> Who is the CTO?</div><div><span style=\"color:var(--red)\">Chunk-RAG:</span> \"John Smith\"   <span style=\"color:var(--text3)\">→ truth: Maria Santos</span></div><br><div><span style=\"color:var(--accent2)\">Q:</span> What is the revenue?</div><div><span style=\"color:var(--red)\">Chunk-RAG:</span> \"$1,000,000\"   <span style=\"color:var(--text3)\">→ truth: 847 million</span></div><br><div><span style=\"color:var(--accent2)\">Q:</span> What percent is R&D?</div><div><span style=\"color:var(--red)\">Chunk-RAG:</span> \"15% of net income\"   <span style=\"color:var(--text3)\">→ truth: 14% of revenue</span></div>",
"verify.halluc.summary": "This is the fundamental danger of chunk-RAG: <strong>retrieval failure becomes silent hallucination</strong>. KV compression makes it possible to load the entire document into context, eliminating this failure mode on consumer hardware.",
"verify.card1.t": "KV Compression = Zero Quality Loss",
"verify.card1.d": "FP32 7/7 = 6.4x compressed 7/7. The 6.4x memory savings cost nothing in fact extraction quality.",
"verify.card2.t": "Multi-Hop Reasoning Works",
"verify.card2.d": "\"What risk affects the growth region?\" requires linking Section 3 (Asia growth) with Section 5 (Asia currency risk). Full-doc: ✓. Chunk-RAG: impossible.",
"verify.card3.t": "Runs on 16GB Mac",
"verify.card3.d": "Llama 3.2 3B Q8_0, no GPU. 6.4x KV compression makes this practical on consumer hardware.",
"verify.cta": "Read the Beyond RAG Manifesto →",
"footer.text": "quant.cpp · Apache 2.0 · <a href=\"https://github.com/quantumaikr/quant.cpp\">GitHub</a> · Made by <a href=\"https://github.com/quantumaikr\">quantumaikr</a>"
},
ko: {
"nav.problem": "\uBB38\uC81C\uC810",
"nav.solution": "\uD575\uC2EC \uBC1C\uACAC",
"nav.techniques": "4\uAC00\uC9C0 \uAE30\uC220",
"nav.benchmarks": "\uBCA4\uCE58\uB9C8\uD06C",
"nav.papers": "\uB17C\uBB38",
"nav.glossary": "\uC6A9\uC5B4\uC9D1",
"hero.title": "AI\uC758 \uAE30\uC5B5\uB825,<br><span class=\"highlight\">6.4\uBC30 \uC555\uCD95</span>",
"hero.subtitle": "KV \uCE90\uC2DC \uC555\uCD95\uC5D0 \uB300\uD55C \uAD50\uC721 \uAC00\uC774\uB4DC \u2014 \uB178\uD2B8\uBD81\uC5D0\uC11C \uC7A5\uBB38 AI\uB97C \uC2E4\uD589\uD558\uB294 \uD575\uC2EC \uAE30\uC220.",
"hero.stat.compression": "KV \uC555\uCD95",
"hero.stat.quality": "\uD488\uC9C8 \uBE44\uC6A9",
"hero.stat.attention": "Attention \uAC00\uC18D",
"hero.stat.loc": "C \uCF54\uB4DC \uC904\uC218",
"toc.label": "\uAC00\uC774\uB4DC",
"toc.title": "\uBAA9\uCC28",
"toc.item1.title": "\uBA54\uBAA8\uB9AC \uBB38\uC81C",
"toc.item1.desc": "KV \uCE90\uC2DC\uAC00 AI \uCD94\uB860\uC758 \uC9C4\uC9DC \uBCD1\uBAA9\uC778 \uC774\uC720",
"toc.item2.title": "KV \uCE90\uC2DC\uB780?",
"toc.item2.desc": "\uD2B8\uB79C\uC2A4\uD3EC\uBA38\uAC00 \uBB38\uB9E5\uC744 \uAE30\uC5B5\uD558\uB294 \uBC29\uBC95\uACFC \uADF8 \uBE44\uC6A9",
"toc.item3.title": "\uD575\uC2EC \uBC1C\uACAC",
"toc.item3.desc": "\uBAA8\uB4E0 \uAE30\uC5B5\uC774 \uB611\uAC19\uC774 \uC911\uC694\uD55C \uAC83\uC740 \uC544\uB2C8\uB2E4",
"toc.item4.title": "4\uAC00\uC9C0 \uC555\uCD95 \uAE30\uC220",
"toc.item4.desc": "Progressive, K/V Asymmetry, H2O Eviction, PyramidKV",
"toc.item5.title": "\uBCA4\uCE58\uB9C8\uD06C",
"toc.item5.desc": "Llama 3.2 1B\uC640 Qwen3.5\uC5D0\uC11C\uC758 \uCE21\uC815 \uACB0\uACFC",
"toc.item6.title": "\uC5F0\uAD6C \uB17C\uBB38",
"toc.item6.desc": "\uAC01 \uAE30\uC220\uC758 \uD559\uC220\uC801 \uAE30\uBC18",
"ch1.label": "1\uC7A5",
"ch1.title": "AI\uC758 \uC9C4\uC9DC \uBCD1\uBAA9",
"ch1.intro": "AI\uC640 \uB300\uD654\uD560 \uB54C, AI\uB294 \uB2F9\uC2E0\uC774 \uB9D0\uD55C \uBAA8\uB4E0 \uAC83\uC744 \uAE30\uC5B5\uD574\uC57C \uD569\uB2C8\uB2E4. \uC774 \uAE30\uC5B5\uC744 <strong>KV \uCE90\uC2DC</strong>\uB77C\uACE0 \uD569\uB2C8\uB2E4. \uCDA9\uACA9\uC801\uC778 \uC0AC\uC2E4:",
"ch1.viz.title": "\uBA54\uBAA8\uB9AC \uC0AC\uC6A9\uB7C9: \uBAA8\uB378 vs KV \uCE90\uC2DC",
"ch1.viz.model": "AI \uBAA8\uB378 (Llama 3.2 3B)",
"ch1.viz.kv": "KV \uCE90\uC2DC (32K \uBB38\uB9E5, FP16)",
"ch1.viz.kv.bar": "8.0 GB \u2014 \uBAA8\uB378\uBCF4\uB2E4 \uD06C\uB2E4!",
"ch1.viz.compressed": "quant.cpp \uC801\uC6A9 KV \uCE90\uC2DC (6.4x)",
"ch1.explanation": "KV \uCE90\uC2DC\uB294 \uB300\uD654\uC758 \uBAA8\uB4E0 \uD1A0\uD070\uACFC \uD568\uAED8 \uC131\uC7A5\uD569\uB2C8\uB2E4. 32K \uBB38\uB9E5\uC5D0\uC11C\uB294 <strong>\uBAA8\uB378 \uC790\uCCB4\uBCF4\uB2E4 2\uBC30 \uB354 \uD07D\uB2C8\uB2E4</strong>. \uAE34 \uB300\uD654 \uC911 \uB178\uD2B8\uBD81 \uBA54\uBAA8\uB9AC\uAC00 \uBD80\uC871\uD574\uC9C0\uB294 \uC774\uC720\uB294 \uBAA8\uB378\uC774 \uB108\uBB34 \uCEE4\uC11C\uAC00 \uC544\uB2C8\uB77C, \uBAA8\uB378\uC758 <em>\uAE30\uC5B5</em>\uC774 \uB108\uBB34 \uD06C\uAE30 \uB54C\uBB38\uC785\uB2C8\uB2E4.",
"ch2.label": "2\uC7A5",
"ch2.title": "KV \uCE90\uC2DC\uB780?",
"ch2.intro": "Transformer\uC5D0\uC11C \uBAA8\uB4E0 \uD1A0\uD070\uC740 \uC774\uC804\uC758 \uBAA8\uB4E0 \uD1A0\uD070\uC5D0 \"\uC8FC\uBAA9(attend)\"\uD569\uB2C8\uB2E4. \uC774\uB97C \uC704\uD574 \uAC01 \uD1A0\uD070\uC740 <strong>Key</strong>(\uB098\uB294 \uBB34\uC5C7\uC778\uAC00?)\uC640 <strong>Value</strong>(\uB098\uC758 \uB0B4\uC6A9\uC740?)\uB97C \uC0DD\uC131\uD569\uB2C8\uB2E4. \uC774\uAC83\uB4E4\uC740 \uBBF8\uB798 \uD1A0\uD070\uC774 \uCC38\uC870\uD560 \uC218 \uC788\uB3C4\uB85D \uC800\uC7A5\uB429\uB2C8\uB2E4.",
"ch2.viz.title": "Attention \uC791\uB3D9 \uBC29\uC2DD (\uAC04\uB2E8 \uBC84\uC804)",
"ch2.viz.query": "\uD604\uC7AC \uD1A0\uD070\uC774 \uC0DD\uC131: <strong style=\"color:var(--accent2)\">Query</strong> = \"\uB098\uB294 \uBB34\uC5C7\uC744 \uCC3E\uACE0 \uC788\uB294\uAC00?\"",
"ch2.viz.kv": "\uAC01 \uACFC\uAC70 \uD1A0\uD070\uC774 \uC800\uC7A5: <strong style=\"color:var(--orange)\">Key</strong> = \"\uB098\uB294 \uC774\uAC83\uC774\uB2E4\" <strong style=\"color:var(--green)\">Value</strong> = \"\uB098\uC758 \uB0B4\uC6A9\uC740 \uC774\uAC83\uC774\uB2E4\"",
"ch2.viz.formula": "<strong style=\"color:var(--accent3)\">Attention</strong> = softmax(<strong style=\"color:var(--accent2)\">Q</strong> × <strong style=\"color:var(--orange)\">K</strong><sup>T</sup>) × <strong style=\"color:var(--green)\">V</strong>",
"ch2.viz.summary": "\u2192 \"\uBAA8\uB4E0 \uACFC\uAC70 \uD1A0\uD070\uC744 \uBCF4\uACE0, \uAD00\uB828 \uC788\uB294 \uAC83\uC5D0 \uC9D1\uC911\uD558\uACE0, \uADF8 \uAC12\uB4E4\uC744 \uD63C\uD569\"",
"ch2.expensive.title": "\uC65C \uBE44\uC2FC\uAC00?",