diff --git a/docs-mintlify/admin/deployment/oidc/aws.mdx b/docs-mintlify/admin/deployment/oidc/aws.mdx
index 23409ecf1b05d..141dd82a330c0 100644
--- a/docs-mintlify/admin/deployment/oidc/aws.mdx
+++ b/docs-mintlify/admin/deployment/oidc/aws.mdx
@@ -309,6 +309,21 @@ deployment's default identity is the simplest place to put this.
+
+
+OIDC only covers Cube's **read** side of the export bucket. The data
+warehouse itself (Snowflake, Redshift, Athena, BigQuery, …) runs the
+`UNLOAD` that writes objects to the bucket, and the warehouse cannot
+federate with Cube's OIDC issuer. You still need to provide **separate
+credentials for the `UNLOAD`** so the warehouse can write to S3 — typically
+an AWS access key pair or a warehouse-side storage integration / IAM role
+— via the standard export bucket env vars (e.g.
+`CUBEJS_DB_EXPORT_BUCKET_AWS_KEY` and `CUBEJS_DB_EXPORT_BUCKET_AWS_SECRET`,
+or the driver-specific storage-integration variables). OIDC then handles
+Cube's download of the unloaded objects from the bucket.
+
+
+
## Cube Store CSPS bucket
Cube Store CSPS lets you store pre-aggregations in your own S3 bucket.
diff --git a/docs-mintlify/admin/deployment/oidc/azure.mdx b/docs-mintlify/admin/deployment/oidc/azure.mdx
index a199b9cbb38e2..0bae480f93e92 100644
--- a/docs-mintlify/admin/deployment/oidc/azure.mdx
+++ b/docs-mintlify/admin/deployment/oidc/azure.mdx
@@ -231,6 +231,21 @@ Contributor** on the storage account.
+
+
+OIDC only covers Cube's **read** side of the export bucket. The data
+warehouse itself (Snowflake on Azure, Synapse, …) runs the `UNLOAD` that
+writes objects to Blob Storage, and the warehouse cannot federate with
+Cube's OIDC issuer. You still need to provide **separate credentials for
+the unload** so the warehouse can write to the container — typically a
+storage account key, SAS token, or a warehouse-side storage integration —
+via the standard export bucket env vars (e.g.
+`CUBEJS_DB_EXPORT_BUCKET_AZURE_KEY`, or the driver-specific
+storage-integration variables). OIDC then handles Cube's download of the
+unloaded objects from the bucket.
+
+
+
## Scaling past 20 federated credentials
A single app registration accepts at most **20 federated credentials**.
diff --git a/docs-mintlify/admin/deployment/oidc/gcp.mdx b/docs-mintlify/admin/deployment/oidc/gcp.mdx
index 788543b6509d0..0e3e1cd35c4dc 100644
--- a/docs-mintlify/admin/deployment/oidc/gcp.mdx
+++ b/docs-mintlify/admin/deployment/oidc/gcp.mdx
@@ -261,6 +261,21 @@ deployment's service account read / write access to the bucket.
+
+
+OIDC only covers Cube's **read** side of the export bucket. The data
+warehouse itself (BigQuery, Snowflake on GCP, …) runs the `UNLOAD` /
+`EXPORT DATA` that writes objects to the bucket, and the warehouse cannot
+federate with Cube's OIDC issuer. You still need to provide **separate
+credentials for the unload** so the warehouse can write to GCS — typically
+an HMAC key pair or a warehouse-side service-account integration — via the
+standard export bucket env vars (e.g.
+`CUBEJS_DB_EXPORT_GCS_CREDENTIALS`, or the driver-specific
+storage-integration variables). OIDC then handles Cube's download of the
+unloaded objects from the bucket.
+
+
+
## Direct federation
If you'd rather skip the service account impersonation hop, grant
diff --git a/docs-mintlify/admin/deployment/oidc/index.mdx b/docs-mintlify/admin/deployment/oidc/index.mdx
index effa1add7b694..128750adf38f7 100644
--- a/docs-mintlify/admin/deployment/oidc/index.mdx
+++ b/docs-mintlify/admin/deployment/oidc/index.mdx
@@ -24,7 +24,9 @@ You can use OIDC workload identity to authenticate to:
- **Data sources** — AWS Athena, Redshift, BigQuery, Snowflake, and any other
driver that supports federated credentials.
- **Export buckets** — S3 and GCS buckets used for `EXPORT_BUCKET` pre-aggregation
- unloads.
+ unloads. OIDC covers Cube's download of the unloaded objects; the warehouse's
+ `UNLOAD` write still needs its own credentials configured on the client side
+ — see the per-cloud guides for details.
- **Cube Store CSPS** — a per-deployment S3 / GCS bucket that holds your
Cube Store pre-aggregations (Customer-Supplied Pre-aggregation Storage).
- **Bring-your-own LLM providers** — AWS Bedrock, Google Vertex AI, and Azure
diff --git a/packages/cubejs-backend-native/src/orchestrator.rs b/packages/cubejs-backend-native/src/orchestrator.rs
index 521577f697647..9b41660b2b606 100644
--- a/packages/cubejs-backend-native/src/orchestrator.rs
+++ b/packages/cubejs-backend-native/src/orchestrator.rs
@@ -11,8 +11,8 @@ use neon::context::{Context, FunctionContext, ModuleContext};
use neon::handle::Handle;
use neon::object::Object;
use neon::prelude::{
- JsArray, JsArrayBuffer, JsBox, JsBuffer, JsFunction, JsObject, JsPromise, JsResult, JsValue,
- NeonResult,
+ JsArray, JsArrayBuffer, JsBox, JsBuffer, JsFunction, JsObject, JsPromise, JsResult, JsString,
+ JsValue, NeonResult,
};
use neon::types::buffer::TypedArray;
use serde::Deserialize;
@@ -330,21 +330,24 @@ pub fn get_cubestore_result(mut cx: FunctionContext) -> JsResult {
let result = cx.argument::>>(0)?;
let js_array = cx.execute_scoped(|mut cx| {
+ let js_keys: Vec> = result.members.iter().map(|k| cx.string(k)).collect();
+
let js_array = JsArray::new(&mut cx, result.rows.len());
for (i, row) in result.rows.iter().enumerate() {
let js_row = cx.execute_scoped(|mut cx| {
let js_row = JsObject::new(&mut cx);
- for (key, value) in result.members.iter().zip(row.iter()) {
- let js_key = cx.string(key);
+
+ for (js_key, value) in js_keys.iter().zip(row.iter()) {
let js_value: Handle<'_, JsValue> = match value {
DBResponsePrimitive::Null => cx.null().upcast(),
// For compatibility, we convert all primitives to strings
other => cx.string(other.to_string()).upcast(),
};
- js_row.set(&mut cx, js_key, js_value)?;
+ js_row.set(&mut cx, *js_key, js_value)?;
}
+
Ok(js_row)
})?;
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/common.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/common.rs
deleted file mode 100644
index 10c4e8f0880dc..0000000000000
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/common.rs
+++ /dev/null
@@ -1,55 +0,0 @@
-use crate::logical_plan::pretty_print::*;
-use crate::planner::planners::multi_stage::MultiStageAppliedState;
-
-impl PrettyPrint for MultiStageAppliedState {
- fn pretty_print(&self, result: &mut PrettyPrintResult, state: &PrettyPrintState) {
- let details_state = state.new_level();
- result.println(
- &format!(
- "-time_dimensions: {}",
- print_symbols(&self.time_dimensions())
- ),
- state,
- );
-
- result.println(
- &format!("-dimensions: {}", print_symbols(&self.dimensions())),
- state,
- );
-
- result.println("dimensions_filters:", &state);
- for filter in self.dimensions_filters().iter() {
- pretty_print_filter_item(result, &details_state, filter);
- }
- result.println("time_dimensions_filters:", &state);
- for filter in self.time_dimensions_filters().iter() {
- pretty_print_filter_item(result, &details_state, filter);
- }
- result.println("measures_filter:", &state);
- for filter in self.measures_filters().iter() {
- pretty_print_filter_item(result, &details_state, filter);
- }
- result.println("segments:", &state);
- for filter in self.segments().iter() {
- pretty_print_filter_item(result, &details_state, filter);
- }
-
- result.println("time_shifts:", &state);
- for (_, time_shift) in self.time_shifts().dimensions_shifts.iter() {
- result.println(
- &format!(
- "- {}: {}",
- time_shift.dimension.full_name(),
- if let Some(interval) = &time_shift.interval {
- interval.to_sql()
- } else if let Some(name) = &time_shift.name {
- format!("{} (named)", name.to_string())
- } else {
- "None".to_string()
- }
- ),
- &details_state,
- );
- }
- }
-}
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/mod.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/mod.rs
index 3a82b775bc971..3d269880839f5 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/mod.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/logical_plan/multistage/mod.rs
@@ -1,5 +1,4 @@
mod calculation;
-mod common;
mod dimension;
mod get_date_range;
mod leaf_measure;
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/base_query.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/base_query.rs
index f64d382d9f3e7..9c1b11ad3cc34 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/base_query.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/base_query.rs
@@ -1,6 +1,6 @@
use super::query_tools::QueryTools;
use super::top_level_planner::TopLevelPlanner;
-use super::QueryProperties;
+use super::{QueryProperties, QueryPropertiesCompiler};
use crate::cube_bridge::base_query_options::BaseQueryOptions;
use crate::cube_bridge::pre_aggregation_obj::NativePreAggregationObj;
use crate::logical_plan::PreAggregationUsage;
@@ -61,7 +61,7 @@ impl BaseQuery {
options.static_data().member_to_alias.clone(),
)?;
- let request = QueryProperties::try_new(query_tools.clone(), options)?;
+ let request = QueryPropertiesCompiler::new(query_tools.clone()).build(options)?;
Ok(Self {
context,
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/mod.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/mod.rs
index 34904dc3403f0..c0db55ab240ff 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/mod.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/mod.rs
@@ -15,6 +15,7 @@ pub mod visitor;
pub mod params_allocator;
pub mod planners;
pub mod query_properties;
+pub mod query_properties_compiler;
pub mod query_tools;
pub mod sql_templates;
pub mod top_level_planner;
@@ -29,6 +30,7 @@ pub use compiler::Compiler;
pub use join_hints::JoinHints;
pub use params_allocator::ParamsAllocator;
pub use query_properties::{FullKeyAggregateMeasures, OrderByItem, QueryProperties};
+pub use query_properties_compiler::QueryPropertiesCompiler;
pub use sql_call::*;
pub use symbols::*;
pub use time_dimension::*;
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/multi_fact_join_groups.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/multi_fact_join_groups.rs
index 8a669c4ce1e53..b00908fa23313 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/multi_fact_join_groups.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/multi_fact_join_groups.rs
@@ -59,13 +59,6 @@ pub struct MeasuresJoinHints {
}
impl MeasuresJoinHints {
- pub fn empty() -> Self {
- Self {
- base_hints: JoinHints::new(),
- measure_hints: vec![],
- }
- }
-
pub fn builder(query_join_hints: &JoinHints) -> MeasuresJoinHintsBuilder {
MeasuresJoinHintsBuilder {
initial_hints: query_join_hints.clone(),
@@ -137,16 +130,6 @@ pub struct MultiFactJoinGroups {
}
impl MultiFactJoinGroups {
- pub fn empty(query_tools: Rc) -> Self {
- Self {
- query_tools,
- measures_join_hints: MeasuresJoinHints::empty(),
- groups: vec![],
- dimension_paths: HashMap::new(),
- measure_paths: HashMap::new(),
- }
- }
-
pub fn try_new(
query_tools: Rc,
measures_join_hints: MeasuresJoinHints,
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/dimension_subquery_planner.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/dimension_subquery_planner.rs
index b123cd1146bea..64dda0181cd8a 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/dimension_subquery_planner.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/dimension_subquery_planner.rs
@@ -3,7 +3,6 @@ use crate::logical_plan::{pretty_print_rc, DimensionSubQuery};
use crate::physical_plan::QualifiedColumnName;
use crate::planner::collectors::collect_sub_query_dimensions;
use crate::planner::filter::FilterItem;
-use crate::planner::join_hints::JoinHints;
use crate::planner::query_tools::QueryTools;
use crate::planner::QueryProperties;
use crate::planner::{MemberExpressionExpression, MemberExpressionSymbol, MemberSymbol};
@@ -111,26 +110,17 @@ impl DimensionSubqueryPlanner {
(vec![], vec![])
};
- let sub_query_properties = QueryProperties::try_new_from_precompiled(
- self.query_tools.clone(),
- vec![measure.clone()], //measures,
- primary_keys_dimensions.clone(),
- vec![],
- time_dimensions_filters,
- dimensions_filters,
- vec![],
- vec![],
- vec![],
- None,
- None,
- true,
- false,
- false,
- false,
- Rc::new(JoinHints::new()),
- true,
- self.query_properties.disable_external_pre_aggregations(),
- )?;
+ let sub_query_properties = QueryProperties::builder()
+ .query_tools(self.query_tools.clone())
+ .measures(vec![measure.clone()])
+ .dimensions(primary_keys_dimensions.clone())
+ .time_dimensions_filters(time_dimensions_filters)
+ .dimensions_filters(dimensions_filters)
+ .ignore_cumulative(true)
+ .disable_external_pre_aggregations(
+ self.query_properties.disable_external_pre_aggregations(),
+ )
+ .build()?;
let query_planner = QueryPlanner::new(sub_query_properties, self.query_tools.clone());
let sub_query = query_planner.plan()?;
let result = Rc::new(DimensionSubQuery {
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/full_key_query_aggregate_planner.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/full_key_query_aggregate_planner.rs
index 25e0a862acf2b..6fbf06027f756 100644
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/full_key_query_aggregate_planner.rs
+++ b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/full_key_query_aggregate_planner.rs
@@ -63,7 +63,7 @@ impl FullKeyAggregateQueryPlanner {
offset: self.query_properties.offset(),
limit: self.query_properties.row_limit(),
ungrouped: self.query_properties.ungrouped(),
- order_by: self.query_properties.order_by().clone(),
+ order_by: self.query_properties.order_by().to_vec(),
}))
.source(source)
.build();
diff --git a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs b/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs
deleted file mode 100644
index 86d56d24330dc..0000000000000
--- a/rust/cube/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs
+++ /dev/null
@@ -1,547 +0,0 @@
-use crate::planner::collectors::has_multi_stage_members;
-use crate::planner::filter::FilterOperator;
-use crate::planner::filter::{FilterGroup, FilterItem};
-use crate::planner::planners::multi_stage::time_shift_state::TimeShiftState;
-use crate::planner::{DimensionTimeShift, MeasureTimeShifts, MemberSymbol};
-use cubenativeutils::CubeError;
-use itertools::Itertools;
-use std::cmp::PartialEq;
-use std::collections::HashSet;
-use std::fmt::Debug;
-use std::rc::Rc;
-
-#[derive(Clone)]
-pub struct MultiStageAppliedState {
- time_dimensions: Vec>,
- dimensions: Vec>,
- time_dimensions_filters: Vec,
- dimensions_filters: Vec,
- measures_filters: Vec,
- segments: Vec,
- time_shifts: TimeShiftState,
-}
-
-impl MultiStageAppliedState {
- pub fn new(
- time_dimensions: Vec>,
- dimensions: Vec>,
- time_dimensions_filters: Vec,
- dimensions_filters: Vec,
- measures_filters: Vec,
- segments: Vec,
- ) -> Rc {
- Rc::new(Self {
- time_dimensions,
- dimensions,
- time_dimensions_filters,
- dimensions_filters,
- measures_filters,
- segments,
- time_shifts: TimeShiftState::default(),
- })
- }
-
- pub fn clone_state(&self) -> Self {
- Self {
- time_dimensions: self.time_dimensions.clone(),
- dimensions: self.dimensions.clone(),
- time_dimensions_filters: self.time_dimensions_filters.clone(),
- dimensions_filters: self.dimensions_filters.clone(),
- measures_filters: self.measures_filters.clone(),
- segments: self.segments.clone(),
- time_shifts: self.time_shifts.clone(),
- }
- }
-
- pub fn add_dimensions(&mut self, dimensions: Vec>) {
- self.dimensions = self
- .dimensions
- .iter()
- .cloned()
- .chain(dimensions.into_iter())
- .unique_by(|d| d.clone().resolve_reference_chain().full_name())
- .collect_vec();
- }
-
- pub fn add_dimension_filter(&mut self, filter: FilterItem) {
- self.dimensions_filters.push(filter);
- }
-
- pub fn remove_multistage_dimensions(
- &mut self,
- resolved_dimensions: &HashSet,
- ) -> Result<(), CubeError> {
- let mut filtered = Vec::new();
- for d in &self.dimensions {
- if resolved_dimensions.contains(&d.clone().resolve_reference_chain().full_name())
- || !has_multi_stage_members(&d, true)?
- {
- filtered.push(d.clone());
- }
- }
- self.dimensions = filtered;
- let mut filtered = Vec::new();
- for d in &self.time_dimensions {
- if resolved_dimensions.contains(&d.clone().resolve_reference_chain().full_name())
- || !has_multi_stage_members(&d, true)?
- {
- filtered.push(d.clone());
- }
- }
- self.time_dimensions = filtered;
- Ok(())
- }
-
- pub fn add_time_shifts(&mut self, time_shifts: MeasureTimeShifts) -> Result<(), CubeError> {
- let resolved_shifts = match time_shifts {
- MeasureTimeShifts::Dimensions(dimensions) => dimensions,
- MeasureTimeShifts::Common(interval) => self
- .all_time_members()
- .into_iter()
- .map(|m| DimensionTimeShift {
- interval: Some(interval.clone()),
- dimension: m,
- name: None,
- })
- .collect_vec(),
- MeasureTimeShifts::Named(named_shift) => self
- .all_time_members()
- .into_iter()
- .map(|m| DimensionTimeShift {
- interval: None,
- dimension: m,
- name: Some(named_shift.clone()),
- })
- .collect_vec(),
- };
- for ts in resolved_shifts.into_iter() {
- if let Some(exists) = self
- .time_shifts
- .dimensions_shifts
- .get_mut(&ts.dimension.full_name())
- {
- if let Some(interval) = exists.interval.clone() {
- if let Some(new_interval) = ts.interval {
- exists.interval = Some(interval + new_interval);
- } else {
- return Err(CubeError::internal(format!(
- "Cannot use both named ({}) and interval ({}) shifts for the same dimension: {}.",
- ts.name.clone().unwrap_or("-".to_string()),
- interval.to_sql(),
- ts.dimension.full_name(),
- )));
- }
- } else if let Some(named_shift) = exists.name.clone() {
- return if let Some(new_interval) = ts.interval {
- Err(CubeError::internal(format!(
- "Cannot use both named ({}) and interval ({}) shifts for the same dimension: {}.",
- named_shift,
- new_interval.to_sql(),
- ts.dimension.full_name(),
- )))
- } else {
- Err(CubeError::internal(format!(
- "Cannot use more than one named shifts ({}, {}) for the same dimension: {}.",
- ts.name.clone().unwrap_or("-".to_string()),
- named_shift,
- ts.dimension.full_name(),
- )))
- };
- }
- } else {
- self.time_shifts
- .dimensions_shifts
- .insert(ts.dimension.full_name(), ts);
- }
- }
- Ok(())
- }
-
- pub fn time_shifts(&self) -> &TimeShiftState {
- &self.time_shifts
- }
-
- fn all_time_members(&self) -> Vec> {
- let mut filter_symbols = self.all_dimensions_symbols();
- for filter_item in self
- .time_dimensions_filters
- .iter()
- .chain(self.dimensions_filters.iter())
- .chain(self.segments.iter())
- {
- filter_item.find_all_member_evaluators(&mut filter_symbols);
- }
-
- let time_symbols = filter_symbols
- .into_iter()
- .filter_map(|m| {
- let symbol = if let Ok(time_dim) = m.as_time_dimension() {
- time_dim.base_symbol().clone().resolve_reference_chain()
- } else {
- m.resolve_reference_chain()
- };
- if let Ok(dim) = symbol.as_dimension() {
- if dim.is_time() {
- Some(symbol)
- } else {
- None
- }
- } else {
- None
- }
- })
- .unique_by(|s| s.full_name())
- .collect_vec();
- time_symbols
- }
-
- pub fn time_dimensions_filters(&self) -> &Vec {
- &self.time_dimensions_filters
- }
-
- pub fn time_dimensions_symbols(&self) -> Vec> {
- self.time_dimensions().clone()
- }
-
- pub fn dimensions_symbols(&self) -> Vec> {
- self.dimensions.clone()
- }
-
- pub fn all_dimensions_symbols(&self) -> Vec> {
- self.time_dimensions
- .iter()
- .cloned()
- .chain(self.dimensions.iter().cloned())
- .collect()
- }
-
- pub fn dimensions_filters(&self) -> &Vec {
- &self.dimensions_filters
- }
-
- pub fn segments(&self) -> &Vec {
- &self.segments
- }
-
- pub fn measures_filters(&self) -> &Vec {
- &self.measures_filters
- }
-
- pub fn dimensions(&self) -> &Vec> {
- &self.dimensions
- }
-
- pub fn time_dimensions(&self) -> &Vec> {
- &self.time_dimensions
- }
-
- pub fn set_time_dimensions(&mut self, time_dimensions: Vec>) {
- self.time_dimensions = time_dimensions;
- }
-
- pub fn set_dimensions(&mut self, dimensions: Vec>) {
- self.dimensions = dimensions;
- }
-
- pub fn remove_filter_for_member(&mut self, member_name: &String) {
- self.time_dimensions_filters =
- self.extract_filters_exclude_member(member_name, &self.time_dimensions_filters);
- self.dimensions_filters =
- self.extract_filters_exclude_member(member_name, &self.dimensions_filters);
- self.measures_filters =
- self.extract_filters_exclude_member(member_name, &self.measures_filters);
- }
-
- fn extract_filters_exclude_member(
- &self,
- member_name: &String,
- filters: &Vec,
- ) -> Vec {
- let mut result = Vec::new();
- for item in filters.iter() {
- match item {
- FilterItem::Group(group) => {
- let new_group = FilterItem::Group(Rc::new(FilterGroup::new(
- group.operator.clone(),
- self.extract_filters_exclude_member(member_name, &group.items),
- )));
- result.push(new_group);
- }
- FilterItem::Item(itm) => {
- if &itm.member_name() != member_name {
- result.push(FilterItem::Item(itm.clone()));
- }
- }
- FilterItem::Segment(_) => {}
- }
- }
- result
- }
-
- pub fn has_filters_for_member(&self, member_name: &String) -> bool {
- self.has_filters_for_member_impl(member_name, &self.time_dimensions_filters)
- || self.has_filters_for_member_impl(member_name, &self.dimensions_filters)
- || self.has_filters_for_member_impl(member_name, &self.measures_filters)
- }
-
- fn has_filters_for_member_impl(&self, member_name: &String, filters: &Vec) -> bool {
- for item in filters.iter() {
- match item {
- FilterItem::Group(group) => {
- if self.has_filters_for_member_impl(member_name, &group.items) {
- return true;
- }
- }
- FilterItem::Item(itm) => {
- if &itm.member_name() == member_name {
- return true;
- }
- }
- FilterItem::Segment(_) => {}
- }
- }
- false
- }
-
- /// Replace InDateRange filter with bounded version for rolling window without granularity.
- /// Unlike `replace_regular_date_range_filter` which uses time_series CTE references,
- /// this keeps parameter-based filters suitable for queries without a time_series CTE.
- pub fn replace_date_range_for_rolling_window_without_granularity(
- &mut self,
- member_name: &String,
- trailing: &Option,
- leading: &Option,
- ) -> Result<(), CubeError> {
- let trailing_unbounded = trailing.as_deref() == Some("unbounded");
- let leading_unbounded = leading.as_deref() == Some("unbounded");
-
- if !trailing_unbounded && !leading_unbounded {
- return Ok(());
- }
-
- if trailing_unbounded && leading_unbounded {
- // Both unbounded — remove the date range filter entirely
- self.time_dimensions_filters.retain(|item| match item {
- FilterItem::Item(itm) => {
- !(&itm.member_name() == member_name
- && matches!(itm.filter_operator(), FilterOperator::InDateRange))
- }
- _ => true,
- });
- } else if trailing_unbounded {
- // Remove lower bound: InDateRange(from, to) → BeforeOrOnDate(to)
- let mut new_filters = Vec::new();
- for item in self.time_dimensions_filters.iter() {
- match item {
- FilterItem::Item(itm)
- if &itm.member_name() == member_name
- && matches!(itm.filter_operator(), FilterOperator::InDateRange) =>
- {
- let values = itm.values();
- let to_value = if values.len() >= 2 {
- vec![values[1].clone()]
- } else {
- values.clone()
- };
- new_filters.push(FilterItem::Item(itm.change_operator(
- FilterOperator::BeforeOrOnDate,
- to_value,
- itm.use_raw_values(),
- )?));
- }
- other => new_filters.push(other.clone()),
- }
- }
- self.time_dimensions_filters = new_filters;
- } else {
- // leading unbounded: remove upper bound: InDateRange(from, to) → AfterOrOnDate(from)
- let mut new_filters = Vec::new();
- for item in self.time_dimensions_filters.iter() {
- match item {
- FilterItem::Item(itm)
- if &itm.member_name() == member_name
- && matches!(itm.filter_operator(), FilterOperator::InDateRange) =>
- {
- let values = itm.values();
- let from_value = if !values.is_empty() {
- vec![values[0].clone()]
- } else {
- values.clone()
- };
- new_filters.push(FilterItem::Item(itm.change_operator(
- FilterOperator::AfterOrOnDate,
- from_value,
- itm.use_raw_values(),
- )?));
- }
- other => new_filters.push(other.clone()),
- }
- }
- self.time_dimensions_filters = new_filters;
- }
- Ok(())
- }
-
- pub fn replace_regular_date_range_filter(
- &mut self,
- member_name: &String,
- left_interval: Option,
- right_interval: Option,
- ) -> Result<(), CubeError> {
- let operator = FilterOperator::RegularRollingWindowDateRange;
- let values = vec![left_interval.clone(), right_interval.clone()];
- self.time_dimensions_filters = self.change_date_range_filter_impl(
- member_name,
- &self.time_dimensions_filters,
- &operator,
- None,
- &values,
- &None,
- )?;
- Ok(())
- }
-
- pub fn replace_to_date_date_range_filter(
- &mut self,
- member_name: &String,
- granularity: &String,
- ) -> Result<(), CubeError> {
- let operator = FilterOperator::ToDateRollingWindowDateRange;
- let values = vec![Some(granularity.clone())];
- self.time_dimensions_filters = self.change_date_range_filter_impl(
- member_name,
- &self.time_dimensions_filters,
- &operator,
- None,
- &values,
- &None,
- )?;
- Ok(())
- }
-
- pub fn replace_range_in_date_filter(
- &mut self,
- member_name: &String,
- new_from: String,
- new_to: String,
- ) -> Result<(), CubeError> {
- let operator = FilterOperator::InDateRange;
- let replacement_values = vec![Some(new_from), Some(new_to)];
- self.time_dimensions_filters = self.change_date_range_filter_impl(
- member_name,
- &self.time_dimensions_filters,
- &operator,
- None,
- &vec![],
- &Some(replacement_values),
- )?;
- Ok(())
- }
-
- pub fn replace_range_to_subquery_in_date_filter(
- &mut self,
- member_name: &String,
- new_from: String,
- new_to: String,
- ) -> Result<(), CubeError> {
- let operator = FilterOperator::InDateRange;
- let replacement_values = vec![Some(new_from), Some(new_to)];
- self.time_dimensions_filters = self.change_date_range_filter_impl(
- member_name,
- &self.time_dimensions_filters,
- &operator,
- Some(true),
- &vec![],
- &Some(replacement_values),
- )?;
- Ok(())
- }
-
- fn change_date_range_filter_impl(
- &self,
- member_name: &String,
- filters: &Vec,
- operator: &FilterOperator,
- use_raw_values: Option,
- additional_values: &Vec