Skip to content

Commit 1e1a1a0

Browse files
author
Elwardi
committed
fix: robust + multi-fidelity play well with each other
1 parent 0ebaf9d commit 1e1a1a0

13 files changed

Lines changed: 756 additions & 17 deletions

src/foambo/api_server.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1889,6 +1889,35 @@ def get_status():
18891889
"context_samples": rc.get("context_samples", 10),
18901890
}
18911891

1892+
# Multi-fidelity info
1893+
mf_info = None
1894+
if raw_cfg:
1895+
params = raw_cfg.get("experiment", {}).get("parameters", [])
1896+
fid_params = [p for p in params if (p.get("is_fidelity") if hasattr(p, "get") else getattr(p, "is_fidelity", False))]
1897+
if fid_params:
1898+
fp = fid_params[0]
1899+
fid_name = fp.get("name", "") if hasattr(fp, "get") else getattr(fp, "name", "")
1900+
fid_target = fp.get("target_value") if hasattr(fp, "get") else getattr(fp, "target_value", None)
1901+
fid_values = fp.get("values") if hasattr(fp, "get") else getattr(fp, "values", None)
1902+
cost_metrics = [m.get("name", "") if hasattr(m, "get") else getattr(m, "name", "")
1903+
for m in raw_cfg.get("optimization", {}).get("metrics", [])
1904+
if (m.get("is_cost") if hasattr(m, "get") else getattr(m, "is_cost", False))]
1905+
# Count trials per fidelity level
1906+
fid_counts = {}
1907+
if client is not None:
1908+
for t in client._experiment.trials.values():
1909+
if t.arm:
1910+
fv = t.arm.parameters.get(fid_name)
1911+
if fv is not None:
1912+
fid_counts[str(fv)] = fid_counts.get(str(fv), 0) + 1
1913+
mf_info = {
1914+
"fidelity_param": fid_name,
1915+
"target_value": fid_target,
1916+
"levels": fid_values or [],
1917+
"cost_metric": cost_metrics[0] if cost_metrics else None,
1918+
"trials_per_fidelity": fid_counts,
1919+
}
1920+
18921921
return SafeJSONResponse(content={
18931922
"running": True,
18941923
"uptime_s": round(time.time() - _state.start_time, 1),
@@ -1899,6 +1928,7 @@ def get_status():
18991928
"model_fitted": has_model,
19001929
"timing": getattr(_state, '_timing', None),
19011930
"robust": robust_info,
1931+
"multi_fidelity": mf_info,
19021932
})
19031933

19041934

src/foambo/common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -321,9 +321,9 @@ def prepare_case(
321321
if "files" in cfg['templating'].keys() and cfg['templating']['files']:
322322
for entry in cfg['templating']['files']:
323323
param_name = entry['parameter']
324-
template_path = entry['file_path']
324+
template_path = entry['file_path'].lstrip('/')
325325
if param_name in parameters:
326-
shutil.copyfile(
326+
shutil.copy2(
327327
os.path.join(case.path, template_path + "." + parameters[param_name]),
328328
os.path.join(case.path, template_path)
329329
)

src/foambo/default_config.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -852,6 +852,114 @@ def __init__(self, **kwargs):
852852
bootstrap loader clears that flag automatically before mutating.
853853
""").strip(),
854854
}
855+
harvested["experiment.parameters[].is_fidelity"] = {
856+
"category": "Config",
857+
"content": textwrap.dedent("""
858+
Mark a parameter as the fidelity dimension for multi-fidelity BO.
859+
The parameter represents evaluation quality — e.g. 0 = cheap
860+
meanline, 1 = expensive CFD. Exactly one parameter should have
861+
this flag. ``target_value`` is the high-fidelity level the
862+
optimizer ultimately cares about.
863+
864+
```yaml
865+
experiment:
866+
parameters:
867+
- name: fidelity
868+
parameter_type: float
869+
bounds: [0.0, 1.0]
870+
is_fidelity: true
871+
target_value: 1.0
872+
```
873+
874+
When detected, foamBO auto-selects ``SingleTaskMultiFidelityGP``
875+
as the surrogate and extracts ``target_fidelities`` for the
876+
acquisition function. The runner should branch on the fidelity
877+
value to dispatch cheap vs expensive evaluations.
878+
""").strip(),
879+
}
880+
881+
harvested["optimization.metrics[].is_cost"] = {
882+
"category": "Config",
883+
"content": textwrap.dedent("""
884+
Mark a metric as the cost signal for multi-fidelity acquisition.
885+
The metric value should represent actual execution cost (e.g.
886+
wall-clock seconds) and must be emitted by the runner at every
887+
fidelity level.
888+
889+
```yaml
890+
optimization:
891+
metrics:
892+
- name: executionTime
893+
command: ["scripts/metric.sh", "executionTime"]
894+
is_cost: true
895+
```
896+
897+
Exactly one metric should have ``is_cost: true``. foamBO learns
898+
per-fidelity mean cost from observed values and updates the
899+
acquisition function's cost model each callback cycle. Without
900+
an ``is_cost`` metric, MF acquisition uses uniform cost
901+
(no cost-aware fidelity selection).
902+
""").strip(),
903+
}
904+
905+
harvested["trial_generation.generation_nodes[].generator_specs[].model_kwargs.botorch_acqf_class"] = {
906+
"category": "Config",
907+
"content": textwrap.dedent("""
908+
Override the default BoTorch acquisition function class. Accepts
909+
a string class name that foamBO resolves to the actual Python
910+
class. Supported MF acquisition functions:
911+
912+
``qMultiFidelityHypervolumeKnowledgeGradient`` (recommended):
913+
- Multi-objective, cost-aware, one-step lookahead.
914+
- Maximizes expected hypervolume improvement at target fidelity.
915+
- Cost model auto-wired from ``is_cost`` metric via
916+
``cost_intercept`` + ``fidelity_weights``.
917+
918+
``MOMF`` (alternative):
919+
- Multi-objective multi-fidelity via fidelity pseudo-objective.
920+
- Faster candidate generation, less sample efficient.
921+
- Requires manual ``cost_call`` in ``botorch_acqf_options``.
922+
923+
```yaml
924+
trial_generation:
925+
method: custom
926+
generation_nodes:
927+
- node_name: MF
928+
generator_specs:
929+
- generator_enum: BOTORCH_MODULAR
930+
model_kwargs:
931+
botorch_acqf_class: "qMultiFidelityHypervolumeKnowledgeGradient"
932+
```
933+
934+
With ``method: fast``, setting ``botorch_acqf_class`` is not
935+
needed — foamBO auto-selects ``qMultiFidelityHypervolumeKnowledgeGradient``
936+
when an ``is_fidelity`` parameter is detected. Custom generation
937+
nodes are only needed to override the default (e.g. pick MOMF).
938+
939+
**Runner dispatch (recommended):** use ``file_substitution`` with
940+
a string ``ChoiceParameter`` fidelity. Place ``Allrun.meanline``
941+
+ ``Allrun.CFD`` in the template case:
942+
```yaml
943+
experiment:
944+
parameters:
945+
- name: fidelity
946+
values: ["meanline", "CFD"]
947+
parameter_type: str
948+
is_fidelity: true
949+
target_value: "CFD"
950+
951+
optimization:
952+
case_runner:
953+
file_substitution:
954+
- parameter: fidelity
955+
file_path: /Allrun
956+
```
957+
958+
Other resolvable names: ``qMultiFidelityKnowledgeGradient``
959+
(single-objective MF), ``qMultiFidelityMaxValueEntropy``.
960+
""").strip(),
961+
}
962+
855963
harvested["dashboard"] = {
856964
"category": "Dashboard",
857965
"content": textwrap.dedent("""

src/foambo/docs_concepts.py

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -624,4 +624,157 @@
624624
it does not (or only changes via ``specialize``).""",
625625
},
626626

627+
"concept.multi_fidelity": {
628+
"category": "Concept",
629+
"content": """\
630+
Multi-fidelity optimization — cheap approximations guide expensive evaluations.
631+
632+
When a fast surrogate (eg. coarse mesh) correlates with the
633+
expensive truth (fine-mesh CFD), multi-fidelity BO spends most of its budget
634+
on cheap queries and reserves expensive evaluations for the most promising
635+
candidates. The GP learns the bias between fidelity levels and the
636+
acquisition function trades information gain against evaluation cost.
637+
638+
**Setup in foamBO:**
639+
640+
1. Add a fidelity parameter with ``is_fidelity: true`` and ``target_value``:
641+
```yaml
642+
experiment:
643+
parameters:
644+
- name: fidelity
645+
parameter_type: str
646+
values: ["coarse", "fine"]
647+
is_fidelity: true
648+
target_value: "fine"
649+
```
650+
651+
2. Mark one metric as the cost signal with ``is_cost: true``:
652+
```yaml
653+
optimization:
654+
metrics:
655+
- name: executionTime
656+
command: ["scripts/metric.sh", "executionTime"]
657+
is_cost: true
658+
```
659+
660+
3. Use ``method: fast`` (or any generation strategy — MF is auto-wired):
661+
```yaml
662+
trial_generation:
663+
method: fast
664+
```
665+
666+
That's it. No custom generation nodes required for MF.
667+
668+
**Auto-wiring:** when ``is_fidelity`` is detected, foamBO automatically:
669+
- Selects ``qMultiFidelityHypervolumeKnowledgeGradient`` as the acqf.
670+
- Sets ``SingleTaskMultiFidelityGP`` as the surrogate.
671+
- Extracts ``target_fidelities`` from the search space (Ax built-in).
672+
- Learns ``cost_intercept`` and ``fidelity_weights`` from observed
673+
``is_cost`` metric data each callback cycle.
674+
675+
To override the acqf (e.g. use MOMF instead), use a custom generation node:
676+
```yaml
677+
trial_generation:
678+
method: custom
679+
generation_nodes:
680+
- node_name: MF
681+
generator_specs:
682+
- generator_enum: BOTORCH_MODULAR
683+
model_kwargs:
684+
botorch_acqf_class: "MOMF"
685+
```
686+
687+
**Two acquisition functions are supported:**
688+
689+
``qMultiFidelityHypervolumeKnowledgeGradient`` (qMF-HVKG):
690+
- Multi-objective, cost-aware, one-step lookahead.
691+
- Maximizes expected hypervolume improvement at target fidelity.
692+
- Better sample efficiency; slower candidate generation.
693+
- Takes ``cost_intercept`` + ``fidelity_weights`` via input constructor;
694+
foamBO auto-derives these from the ``is_cost`` metric.
695+
- **Recommended for most use cases** (trial cost dominates gen time).
696+
697+
``MOMF`` (Multi-Objective Multi-Fidelity):
698+
- Adds fidelity as a pseudo-objective (trust reward for higher fidelity).
699+
- Faster candidate generation; less sample efficient.
700+
- Takes ``cost_call`` (a callable) directly.
701+
- foamBO does NOT auto-wire cost for MOMF; pass ``cost_call`` manually
702+
via ``botorch_acqf_options`` if needed.
703+
704+
**Runner dispatch** via ``file_substitution`` (recommended):
705+
Use a string ``ChoiceParameter`` for fidelity and foamBO's built-in file
706+
substitution to swap the runner script (or a portion of it) per fidelity level. Place
707+
``Allrun.coarse`` and ``Allrun.fine`` in the template case:
708+
```yaml
709+
optimization:
710+
case_runner:
711+
file_substitution:
712+
- parameter: fidelity
713+
file_path: /Allrun
714+
```
715+
When ``fidelity=coarse``, the runner copies ``Allrun.coarse`` →
716+
``Allrun`` before execution. When ``fidelity=fine``, copies
717+
``Allrun.fine`` → ``Allrun``. No if/else branching in scripts needed.
718+
719+
**Alternative** (continuous fidelity via env var):
720+
```bash
721+
if [ "$FIDELITY" = "0" ] || [ "$FIDELITY" = "0.0" ]; then
722+
./Allrun.coarse
723+
else
724+
./Allrun.fine
725+
fi
726+
```
727+
728+
**Cost model evolution:** before any trials complete, uniform cost is
729+
assumed (cost ratio = 1). As ``is_cost`` data arrives, foamBO recomputes
730+
per-fidelity mean cost and updates ``cost_intercept`` / ``fidelity_weights``
731+
each callback — no restart needed.
732+
733+
**Cost scaling warning:** the ``is_cost`` metric should emit *scaled*
734+
costs, not raw wall-clock seconds. With extreme cost ratios (e.g. coarse
735+
1s vs fine 3600s = 1:3600), the acquisition function may defer expensive
736+
evaluations indefinitely — the info-gain-per-cost ratio always favors
737+
cheap queries when the denominator is 3600× larger.
738+
739+
**Recommendation:** cap the effective ratio to 1:50–1:100 by emitting
740+
a normalized cost. Use the baseline trial's execution time as the
741+
reference scale:
742+
743+
- Coarse fidelity metric script: ``echo 1``
744+
- Fine fidelity metric script: ``echo 50`` (not the raw wall time)
745+
746+
This tells the optimizer "fine is 50× more expensive" — enough to prefer
747+
coarse for exploration, but not so extreme that fine is never selected.
748+
Tune the ratio based on how many fine-fidelity evaluations you can afford
749+
in your budget. A ratio of 1:N means roughly 1 fine trial per N coarse
750+
trials.
751+
752+
Also consider seeding 3–5 initial trials at target fidelity (via SOBOL
753+
init phase with ``fixed_features``) so the GP has fine-fidelity signal
754+
from the start to estimate the coarse→fine bias.
755+
756+
**Composition with robust mode:** fully automatic. When both
757+
``is_fidelity`` and ``robust_optimization`` are present, foamBO composes
758+
them into a single acquisition loop:
759+
760+
- **Surrogate**: ``SingleTaskMultiFidelityGP`` (handles fidelity kernel)
761+
with ``SubstituteContextFeatures`` (handles context fan-out) as chained
762+
input transforms on the same model.
763+
- **Acquisition**: ``qMultiFidelityHypervolumeKnowledgeGradient`` with a
764+
``RobustMCObjective`` (MARS or CVaR) passed as the ``objective``.
765+
- **Per-candidate evaluation**: qMFHVKG proposes ``(design, fidelity)``,
766+
the GP posterior fans to K context points via SubstituteContextFeatures,
767+
the risk-measure objective reduces K contexts to risk-adjusted values,
768+
and qMFHVKG computes cost-aware HV improvement at target fidelity.
769+
770+
No extra YAML is needed — setting ``is_fidelity`` on a parameter alongside
771+
``robust_optimization`` triggers composition automatically. The cost model
772+
(``is_cost`` metric) works identically in the composed path.
773+
774+
**Staged fallback** is still available for simpler workflows: MF BO at
775+
nominal context first, then robust verification via ``bootstrap``.
776+
777+
See also: ``concept.robust_optimization``, ``concept.bootstrap_and_specialize``.""",
778+
},
779+
627780
}

src/foambo/metrics.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,14 @@ class LocalJobMetric(FoamBOBaseModel):
315315
"Use this when optimization parameters affect simulation speed (e.g. AMR/mesh refinement), "
316316
"so trials are compared at the same physical time rather than the same poll count."
317317
))
318+
is_cost: bool = Field(default=False, description=(
319+
"Mark this metric as the cost signal for multi-fidelity acquisition (MOMF). "
320+
"Exactly one metric must have is_cost=True when using a cost-aware MF acqf. "
321+
"The metric value should represent actual execution cost (e.g. wall-clock "
322+
"seconds) and must be emitted at every fidelity level. foamBO builds a "
323+
"per-fidelity mean-cost lookup from observed values and passes it as "
324+
"cost_call to the acquisition function."
325+
))
318326

319327
def to_metric(self):
320328
cfg = DictConfig({
@@ -419,17 +427,18 @@ def streaming_metric(client: Client, opt_cfg: Dict):
419427
metric["progress"]
420428
.replace("$FOAMBO_STEP", str(trial_progression_step[trial_idx][metric["name"]]))
421429
.replace("$STEP", str(trial_progression_step[trial_idx][metric["name"]]))
422-
if "progress" in metric and isinstance(metric["progress"], str)
430+
if "progress" in metric and isinstance(metric.get("progress"), str)
423431
else [
424432
p.replace("$FOAMBO_STEP", str(trial_progression_step[trial_idx][metric["name"]]))
425433
.replace("$STEP", str(trial_progression_step[trial_idx][metric["name"]]))
426434
if isinstance(p, str) else p
427-
for p in metric["progress"]
435+
for p in metric.get("progress") or []
428436
]
429437
)
430438
}
431439
for metric in metrics_cfg
432440
if metric["name"] not in objective_names
441+
and metric.get("progress") and metric.get("progress") != [] and metric.get("progress") != ""
433442
]
434443
# If we have no eligible progress items (command or callable), skip this
435444
has_progress_cmd = any(cfg.get("progress") and cfg["progress"] != "" and cfg["progress"] != "none"

0 commit comments

Comments
 (0)