Skip to content

Commit f364143

Browse files
authored
Merge pull request #16 from Sankhya-AI/alpha
V2.2.2
2 parents e7e948b + c0245c1 commit f364143

22 files changed

Lines changed: 163 additions & 98 deletions

CHANGELOG.md

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,46 @@ All notable changes to this project will be documented in this file.
44

55
The format is based on [Keep a Changelog](https://keepachangelog.com/), and this project adheres to [Semantic Versioning](https://semver.org/).
66

7+
## [2.2.0b1] - 2026-03-31 — Architectural Cleanup
8+
9+
Beta release focused on internal discipline rather than new features.
10+
11+
### Changed — Architecture
12+
13+
- **main.py decomposition**: Extracted `SearchPipeline`, `MemoryWritePipeline`, `OrchestrationEngine` from the 6,129-line monolith. main.py is now ~3,100 lines (49% reduction).
14+
- **Public surface**: `dhee/__init__.py` rewritten for clean, narrow exports. `Memory = CoreMemory` (not FullMemory). Cognitive subsystems intentionally kept internal.
15+
- **MCP split**: `mcp_slim.py` (4-tool product surface) vs `mcp_server.py` (24-tool power surface). Clear separation of concerns.
16+
17+
### Changed — Rename Debt
18+
19+
- All `FADEM_*` env vars → `DHEE_*` (with `FADEM_*` fallback for backward compat).
20+
- Internal `fadem_config``fade_config` across memory package.
21+
- Default collection name `fadem_memories``dhee_memories`.
22+
- Config field `MemoryConfig.engram``MemoryConfig.fade`.
23+
- CLI, MCP server, observability, presets: all `engram` product references removed.
24+
25+
### Added — D2Skill Policy Improvements
26+
27+
- **Dual-granularity policies**: `PolicyGranularity.TASK` (strategy) vs `PolicyGranularity.STEP` (local correction). Inspired by D2Skill (arXiv:2603.28716).
28+
- **Utility scoring**: EMA-smoothed performance delta tracking on policies. Three-signal retrieval ranking (condition match + sigmoid utility + UCB exploration bonus).
29+
- **Utility-based pruning**: `PolicyStore.prune()` removes deprecated policies first, protects validated ones.
30+
- **Buddhi wiring**: `reflect()` accepts `outcome_score`, computes baseline vs actual delta, feeds it to policy `record_outcome()`.
31+
32+
### Fixed
33+
34+
- `buddhi.py`: Replaced dead `memory.get_last_session_digest()` call with working `get_last_session()` import.
35+
- `mcp_server.py`: Fixed wrong "8 tools total" comment (actually 24), fixed `-> Memory` type hints (Memory not imported).
36+
- CLI: Wired `benchmark` command into parser (existed but was unreachable).
37+
- `PolicyStore.prune()`: Fixed bug where `candidates.pop()` pruned validated policies instead of deprecated ones.
38+
- CHANGELOG 2.1.0: Removed false "Production/Stable" and "A-grade" claims.
39+
40+
### Changed — Packaging
41+
42+
- Version: 2.1.0 → 2.2.0b1
43+
- Classifier: `Development Status :: 4 - Beta` (was falsely claiming Production/Stable)
44+
45+
---
46+
747
## [2.1.0] - 2026-03-30 — Cognition Primitives
848

949
Dhee V2.1: Adds first-class cognitive primitives (episodes, tasks, policies, beliefs, triggers) and a 60-test suite that exercises them. These are internal building blocks — the public API remains the 4-operation surface (remember/recall/context/checkpoint).

dhee/cli.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
dhee categories List categories
1111
dhee export Export to JSON
1212
dhee import <file> Import from JSON
13+
dhee benchmark Run performance benchmarks
1314
dhee status Version, config, DB info
1415
"""
1516

@@ -457,6 +458,9 @@ def build_parser() -> argparse.ArgumentParser:
457458
p_status = sub.add_parser("status", help="Show version, config, and agents")
458459
p_status.add_argument("--json", action="store_true", help="JSON output")
459460

461+
# benchmark
462+
sub.add_parser("benchmark", help="Run performance benchmarks")
463+
460464
# uninstall
461465
sub.add_parser("uninstall", help="Remove ~/.dhee directory")
462466

@@ -477,6 +481,7 @@ def build_parser() -> argparse.ArgumentParser:
477481
"export": cmd_export,
478482
"import": cmd_import,
479483
"status": cmd_status,
484+
"benchmark": cmd_benchmark,
480485
"uninstall": cmd_uninstall,
481486
}
482487

dhee/cli_config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,15 +125,15 @@ def get_memory_instance(config: Optional[Dict[str, Any]] = None):
125125
provider="sqlite_vec",
126126
config={
127127
"path": vec_db_path,
128-
"collection_name": "fadem_memories",
128+
"collection_name": "dhee_memories",
129129
"embedding_model_dims": embedding_dims,
130130
},
131131
),
132132
llm=LLMConfig(provider=provider, config=llm_cfg),
133133
embedder=EmbedderConfig(provider=provider, config=embedder_cfg),
134134
history_db_path=history_db_path,
135135
embedding_model_dims=embedding_dims,
136-
engram=FadeMemConfig(enable_forgetting=True),
136+
fade=FadeMemConfig(enable_forgetting=True),
137137
)
138138

139139
return FullMemory(memory_config)

dhee/configs/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ class VectorStoreConfig(BaseModel):
2424
config: Dict[str, Any] = Field(
2525
default_factory=lambda: {
2626
"path": os.path.join(_dhee_data_dir(), "zvec"),
27-
"collection_name": "fadem_memories",
27+
"collection_name": "dhee_memories",
2828
}
2929
)
3030

@@ -605,15 +605,15 @@ class MemoryConfig(BaseModel):
605605
history_db_path: str = Field(
606606
default_factory=lambda: os.path.join(_dhee_data_dir(), "history.db")
607607
)
608-
collection_name: str = "fadem_memories"
608+
collection_name: str = "dhee_memories"
609609
embedding_model_dims: int = 4096 # nvidia/nv-embed-v1 default dimensions
610610
version: str = "v1.4" # Updated for CLS Distillation Memory
611611
custom_fact_extraction_prompt: Optional[str] = None
612612
custom_conflict_prompt: Optional[str] = None
613613
custom_fusion_prompt: Optional[str] = None
614614
custom_echo_prompt: Optional[str] = None
615615
custom_category_prompt: Optional[str] = None
616-
engram: FadeMemConfig = Field(default_factory=FadeMemConfig)
616+
fade: FadeMemConfig = Field(default_factory=FadeMemConfig)
617617
echo: EchoMemConfig = Field(default_factory=EchoMemConfig)
618618
category: CategoryMemConfig = Field(default_factory=CategoryMemConfig)
619619
scope: ScopeConfig = Field(default_factory=ScopeConfig)

dhee/configs/presets.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def minimal_config():
4545
history_db_path=os.path.join(data_dir, "history.db"),
4646
collection_name="dhee_memories",
4747
embedding_model_dims=384,
48-
engram=FadeMemConfig(enable_forgetting=True),
48+
fade=FadeMemConfig(enable_forgetting=True),
4949
echo=EchoMemConfig(enable_echo=False),
5050
category=CategoryMemConfig(enable_categories=False),
5151
graph=KnowledgeGraphConfig(enable_graph=False),
@@ -122,7 +122,7 @@ def smart_config():
122122
history_db_path=os.path.join(data_dir, "history.db"),
123123
collection_name="dhee_memories",
124124
embedding_model_dims=dims,
125-
engram=FadeMemConfig(enable_forgetting=True),
125+
fade=FadeMemConfig(enable_forgetting=True),
126126
echo=EchoMemConfig(enable_echo=has_llm),
127127
category=CategoryMemConfig(enable_categories=has_llm),
128128
graph=KnowledgeGraphConfig(enable_graph=True, use_llm_extraction=False),

dhee/core/forgetting.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,14 +29,14 @@ def __init__(
2929
self,
3030
db: "SQLiteManager",
3131
config: "DistillationConfig",
32-
fadem_config: "FadeMemConfig",
32+
fade_config: "FadeMemConfig",
3333
resolve_conflict_fn=None,
3434
search_fn=None,
3535
llm=None,
3636
):
3737
self.db = db
3838
self.config = config
39-
self.fadem_config = fadem_config
39+
self.fade_config = fade_config
4040
self.resolve_conflict_fn = resolve_conflict_fn
4141
self.search_fn = search_fn
4242
self.llm = llm
@@ -90,7 +90,7 @@ def run(
9090
nearest = neighbors[0]
9191
similarity = float(nearest.score)
9292

93-
if similarity < self.fadem_config.conflict_similarity_threshold:
93+
if similarity < self.fade_config.conflict_similarity_threshold:
9494
continue
9595

9696
# Fetch the neighbor memory from DB
@@ -319,12 +319,12 @@ def __init__(
319319
self,
320320
db: "SQLiteManager",
321321
config: "DistillationConfig",
322-
fadem_config: "FadeMemConfig",
322+
fade_config: "FadeMemConfig",
323323
delete_fn=None,
324324
):
325325
self.db = db
326326
self.config = config
327-
self.fadem_config = fadem_config
327+
self.fade_config = fade_config
328328
self.delete_fn = delete_fn
329329

330330
def run(
@@ -373,7 +373,7 @@ def run(
373373
pressure = strength * pressure_factor * excess_ratio
374374
new_strength = max(0.0, strength - pressure)
375375

376-
if new_strength < self.fadem_config.forgetting_threshold:
376+
if new_strength < self.fade_config.forgetting_threshold:
377377
if self.delete_fn:
378378
try:
379379
self.delete_fn(memory["id"])

dhee/mcp_server.py

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def _get_embedding_dims_for_model(model: str, provider: str) -> int:
5757
"text-embedding-3-large": 3072,
5858
"text-embedding-ada-002": 1536,
5959
}
60-
env_dims = os.environ.get("FADEM_EMBEDDING_DIMS")
60+
env_dims = os.environ.get("DHEE_EMBEDDING_DIMS") or os.environ.get("FADEM_EMBEDDING_DIMS")
6161
if env_dims:
6262
return int(env_dims)
6363
if model in EMBEDDING_DIMS:
@@ -69,8 +69,8 @@ def _get_embedding_dims_for_model(model: str, provider: str) -> int:
6969
return 3072
7070

7171

72-
def get_memory_instance() -> Memory:
73-
"""Create and return a configured Memory instance (FullMemory for MCP)."""
72+
def get_memory_instance() -> FullMemory:
73+
"""Create and return a configured FullMemory instance for the MCP server."""
7474
openai_key = os.environ.get("OPENAI_API_KEY")
7575
gemini_key = os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
7676
nvidia_key = (
@@ -79,13 +79,17 @@ def get_memory_instance() -> Memory:
7979
or os.environ.get("NVIDIA_EMBEDDING_API_KEY")
8080
)
8181

82+
def _env(key: str, default: str = "") -> str:
83+
"""Read DHEE_ env var with FADEM_ fallback for backward compat."""
84+
return os.environ.get(f"DHEE_{key}") or os.environ.get(f"FADEM_{key}") or default
85+
8286
if openai_key:
83-
embedder_model = os.environ.get("FADEM_EMBEDDER_MODEL", "text-embedding-3-small")
87+
embedder_model = _env("EMBEDDER_MODEL", "text-embedding-3-small")
8488
embedding_dims = _get_embedding_dims_for_model(embedder_model, "openai")
8589
llm_config = LLMConfig(
8690
provider="openai",
8791
config={
88-
"model": os.environ.get("FADEM_LLM_MODEL", "gpt-4o-mini"),
92+
"model": _env("LLM_MODEL", "gpt-4o-mini"),
8993
"temperature": 0.1, "max_tokens": 1024, "api_key": openai_key,
9094
}
9195
)
@@ -94,12 +98,12 @@ def get_memory_instance() -> Memory:
9498
config={"model": embedder_model, "api_key": openai_key},
9599
)
96100
elif gemini_key:
97-
embedder_model = os.environ.get("FADEM_EMBEDDER_MODEL", "gemini-embedding-001")
101+
embedder_model = _env("EMBEDDER_MODEL", "gemini-embedding-001")
98102
embedding_dims = _get_embedding_dims_for_model(embedder_model, "gemini")
99103
llm_config = LLMConfig(
100104
provider="gemini",
101105
config={
102-
"model": os.environ.get("FADEM_LLM_MODEL", "gemini-2.0-flash"),
106+
"model": _env("LLM_MODEL", "gemini-2.0-flash"),
103107
"temperature": 0.1, "max_tokens": 1024, "api_key": gemini_key,
104108
}
105109
)
@@ -108,13 +112,12 @@ def get_memory_instance() -> Memory:
108112
config={"model": embedder_model, "api_key": gemini_key},
109113
)
110114
elif nvidia_key:
111-
# Internal provider — not customer-documented
112-
embedder_model = os.environ.get("FADEM_EMBEDDER_MODEL", "nvidia/llama-nemotron-embed-vl-1b-v2")
115+
embedder_model = _env("EMBEDDER_MODEL", "nvidia/llama-nemotron-embed-vl-1b-v2")
113116
embedding_dims = 2048
114117
llm_config = LLMConfig(
115118
provider="nvidia",
116119
config={
117-
"model": os.environ.get("FADEM_LLM_MODEL", "qwen/qwen3.5-397b-a17b"),
120+
"model": _env("LLM_MODEL", "qwen/qwen3.5-397b-a17b"),
118121
"temperature": 0.2, "max_tokens": 4096, "api_key": nvidia_key,
119122
}
120123
)
@@ -131,17 +134,15 @@ def get_memory_instance() -> Memory:
131134
)
132135

133136
from dhee.configs.base import _dhee_data_dir
134-
vec_db_path = os.environ.get(
135-
"FADEM_VEC_DB_PATH",
136-
os.path.join(_dhee_data_dir(), "zvec"),
137-
)
137+
vec_db_path = _env("VEC_DB_PATH") or os.path.join(_dhee_data_dir(), "zvec")
138+
collection = _env("COLLECTION", "dhee_memories")
138139

139140
# Use in-memory vector store for simple embedder (no persistent storage needed)
140141
if embedder_config.provider == "simple":
141142
vector_store_config = VectorStoreConfig(
142143
provider="memory",
143144
config={
144-
"collection_name": os.environ.get("FADEM_COLLECTION", "fadem_memories"),
145+
"collection_name": collection,
145146
"embedding_model_dims": embedding_dims,
146147
},
147148
)
@@ -150,20 +151,17 @@ def get_memory_instance() -> Memory:
150151
provider="zvec",
151152
config={
152153
"path": vec_db_path,
153-
"collection_name": os.environ.get("FADEM_COLLECTION", "fadem_memories"),
154+
"collection_name": collection,
154155
"embedding_model_dims": embedding_dims,
155156
},
156157
)
157158

158-
history_db_path = os.environ.get(
159-
"FADEM_HISTORY_DB",
160-
os.path.join(_dhee_data_dir(), "history.db"),
161-
)
159+
history_db_path = _env("HISTORY_DB") or os.path.join(_dhee_data_dir(), "history.db")
162160

163-
fadem_config = FadeMemConfig(
164-
enable_forgetting=os.environ.get("FADEM_ENABLE_FORGETTING", "true").lower() == "true",
165-
sml_decay_rate=float(os.environ.get("FADEM_SML_DECAY_RATE", "0.15")),
166-
lml_decay_rate=float(os.environ.get("FADEM_LML_DECAY_RATE", "0.02")),
161+
fade_config = FadeMemConfig(
162+
enable_forgetting=_env("ENABLE_FORGETTING", "true").lower() == "true",
163+
sml_decay_rate=float(_env("SML_DECAY_RATE", "0.15")),
164+
lml_decay_rate=float(_env("LML_DECAY_RATE", "0.02")),
167165
)
168166

169167
config = MemoryConfig(
@@ -172,18 +170,18 @@ def get_memory_instance() -> Memory:
172170
embedder=embedder_config,
173171
history_db_path=history_db_path,
174172
embedding_model_dims=embedding_dims,
175-
engram=fadem_config,
173+
fade=fade_config,
176174
)
177175

178176
return FullMemory(config)
179177

180178

181179
# Global instances (lazy)
182-
_memory: Optional[Memory] = None
180+
_memory: Optional[FullMemory] = None
183181
_buddhi = None # type: ignore
184182

185183

186-
def get_memory() -> Memory:
184+
def get_memory() -> FullMemory:
187185
global _memory
188186
if _memory is None:
189187
_memory = get_memory_instance()
@@ -203,7 +201,7 @@ def get_buddhi():
203201

204202
server = Server("dhee")
205203

206-
# Tool definitions — 8 tools total
204+
# Tool definitions — 24 tools
207205
TOOLS = [
208206
Tool(
209207
name="remember",

dhee/memory/core.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __init__(
6565
self.vector_store = VectorStoreFactory.create(
6666
self.config.vector_store.provider, self.config.vector_store.config
6767
)
68-
self.fadem_config = self.config.engram
68+
self.fade_config = self.config.fade
6969
self.distillation_config = getattr(self.config, "distillation", None)
7070

7171
# Query embedding LRU cache
@@ -115,7 +115,7 @@ def add(
115115
# Boost fast trace if multi-trace is enabled
116116
if self.distillation_config and self.distillation_config.enable_multi_trace:
117117
s_fast = existing.get("s_fast") or 0.0
118-
boosted = boost_fast_trace(s_fast, self.fadem_config.access_strength_boost)
118+
boosted = boost_fast_trace(s_fast, self.fade_config.access_strength_boost)
119119
self.db.update_memory(existing["id"], {"s_fast": boosted})
120120
return {
121121
"results": [{
@@ -162,7 +162,7 @@ def add(
162162
"confidentiality_scope": metadata.get("confidentiality_scope", "work"),
163163
"source_type": "mcp",
164164
"source_app": source_app,
165-
"decay_lambda": self.fadem_config.sml_decay_rate,
165+
"decay_lambda": self.fade_config.sml_decay_rate,
166166
"status": "active",
167167
"importance": metadata.get("importance", 0.5),
168168
"sensitivity": metadata.get("sensitivity", "normal"),
@@ -374,11 +374,11 @@ def apply_decay(
374374
last_accessed=mem.get("last_accessed", mem.get("created_at", "")),
375375
access_count=int(mem.get("access_count", 0)),
376376
layer=mem.get("layer", "sml"),
377-
config=self.fadem_config,
377+
config=self.fade_config,
378378
)
379379

380-
if should_forget(new_strength, self.fadem_config):
381-
if self.fadem_config.use_tombstone_deletion:
380+
if should_forget(new_strength, self.fade_config):
381+
if self.fade_config.use_tombstone_deletion:
382382
self.db.update_memory(mem["id"], {"tombstone": 1, "strength": new_strength})
383383
else:
384384
self.db.delete_memory(mem["id"])
@@ -391,7 +391,7 @@ def apply_decay(
391391
mem.get("layer", "sml"),
392392
int(mem.get("access_count", 0)),
393393
new_strength,
394-
self.fadem_config,
394+
self.fade_config,
395395
):
396396
self.db.update_memory(mem["id"], {"strength": new_strength, "layer": "lml"})
397397
promoted += 1

0 commit comments

Comments
 (0)