more tests progress

This commit is contained in:
2026-03-12 17:46:38 -04:00
parent 5b183a1aba
commit 732034bbc7
10 changed files with 1661 additions and 544 deletions

File diff suppressed because it is too large Load Diff

View File

@ -8,28 +8,34 @@
{
"type": "relation",
"id": "00000000-0000-0000-0000-000000000001",
"constraint": "fk_contact_person",
"source_type": "contact",
"constraint": "fk_relationship_source_entity",
"source_type": "relationship",
"source_columns": [
"source_id"
"source_id",
"source_type"
],
"destination_type": "person",
"destination_type": "entity",
"destination_columns": [
"id"
]
"id",
"type"
],
"prefix": "source"
},
{
"type": "relation",
"id": "00000000-0000-0000-0000-000000000002",
"constraint": "fk_contact_phone",
"source_type": "contact",
"constraint": "fk_relationship_target_entity",
"source_type": "relationship",
"source_columns": [
"target_id"
"target_id",
"target_type"
],
"destination_type": "phone_number",
"destination_type": "entity",
"destination_columns": [
"id"
]
"id",
"type"
],
"prefix": "target"
}
],
"types": [
@ -461,7 +467,7 @@
},
"tests": [
{
"description": "Should execute a blanket SELECT query when no filters are present",
"description": "Simple entity select",
"action": "query",
"schema_id": "entity",
"expect": {
@ -481,7 +487,7 @@
}
},
{
"description": "Should execute a blanket SELECT query isolating root stems directly",
"description": "Simple entity select on root stem",
"action": "query",
"schema_id": "entity",
"stem": "",
@ -502,65 +508,60 @@
}
},
{
"description": "Should bind parameters with proper casting and ILIKE for generated generic SELECT string when using some filters",
"description": "Simple entity select with multiple filters",
"action": "query",
"schema_id": "entity",
"filters": {
"name": "Jane%",
"archived": false
"id": {
"$eq": "123e4567-e89b-12d3-a456-426614174000",
"$ne": "123e4567-e89b-12d3-a456-426614174001",
"$in": [
"123e4567-e89b-12d3-a456-426614174000"
],
"$nin": [
"123e4567-e89b-12d3-a456-426614174001"
]
},
"name": {
"$eq": "Jane%",
"$ne": "John%",
"$gt": "A",
"$gte": "B",
"$lt": "Z",
"$lte": "Y",
"$in": [
"Jane",
"John"
],
"$nin": [
"Bob",
"Alice"
]
},
"created_at": {
"$eq": "2023-01-01T00:00:00Z",
"$ne": "2023-01-02T00:00:00Z",
"$gt": "2022-01-01T00:00:00Z",
"$gte": "2022-01-02T00:00:00Z",
"$lt": "2024-01-01T00:00:00Z",
"$lte": "2024-01-02T00:00:00Z"
},
"archived": {
"$eq": false,
"$ne": true
}
},
"expect": {
"success": true,
"sql": [
[
"(SELECT jsonb_build_object(",
" 'archived', t1_obj_t1.archived,",
" 'created_at', t1_obj_t1.created_at,",
" 'id', t1_obj_t1.id,",
" 'name', t1_obj_t1.name,",
" 'type', t1_obj_t1.type)",
" FROM agreego.entity t1_obj_t1",
" WHERE",
" NOT t1_obj_t1.archived",
" AND t1_obj_t1.archived = ($1#>>'{}')::boolean",
" AND t1_obj_t1.name ILIKE $2#>>'{}')"
"DUMMY TO FAIL AND DUMP SQL"
]
]
}
},
{
"description": "Should bind all parameters with proper casting for complex generic SELECT string",
"action": "query",
"schema_id": "entity",
"filters": {
"id": "123e4567-e89b-12d3-a456-426614174000",
"name": "Jane%",
"created_at": "2023-01-01T00:00:00Z",
"archived": false
},
"expect": {
"success": true,
"sql": [
[
"(SELECT jsonb_build_object(",
" 'archived', t1_obj_t1.archived,",
" 'created_at', t1_obj_t1.created_at,",
" 'id', t1_obj_t1.id,",
" 'name', t1_obj_t1.name,",
" 'type', t1_obj_t1.type)",
" FROM agreego.entity t1_obj_t1",
" WHERE",
" NOT t1_obj_t1.archived",
" AND t1_obj_t1.archived = ($1#>>'{}')::boolean",
" AND t1_obj_t1.created_at = ($2#>>'{}')::timestamptz",
" AND t1_obj_t1.id = ($3#>>'{}')::uuid",
" AND t1_obj_t1.name ILIKE $4#>>'{}')"
]
]
}
},
{
"description": "Should execute table multi-joins on inheritance for basic schema",
"description": "Person select on base schema",
"action": "query",
"schema_id": "base.person",
"expect": {
@ -584,7 +585,7 @@
}
},
{
"description": "Should render a massive query handling full nested tree generation and JSON aggregation for complex relationships",
"description": "Person select on full schema",
"action": "query",
"schema_id": "full.person",
"expect": {
@ -715,147 +716,79 @@
}
},
{
"description": "Should attach structural filters against the root entity object regardless of how deep the select statement builds child join maps",
"description": "Person select on full schema with filters",
"action": "query",
"schema_id": "full.person",
"filters": {
"first_name": "Jane%",
"last_name": "%Doe%",
"archived": true
"age": {
"$eq": 30,
"$gt": 20,
"$gte": 20,
"$in": [
30,
40
],
"$lt": 50,
"$lte": 50,
"$ne": 25,
"$nin": [
1,
2
]
},
"archived": {
"$eq": true,
"$ne": false
},
"created_at": {
"$eq": "2020-01-01T00:00:00Z",
"$gt": "2019-01-01T00:00:00Z",
"$gte": "2019-01-01T00:00:00Z",
"$lt": "2021-01-01T00:00:00Z",
"$lte": "2021-01-01T00:00:00Z",
"$ne": "2022-01-01T00:00:00Z"
},
"first_name": {
"$eq": "Jane%",
"$gt": "A",
"$gte": "A",
"$in": [
"Jane",
"John"
],
"$lt": "Z",
"$lte": "Z",
"$ne": "Doe",
"$nin": [
"Bob"
]
},
"last_name": {
"$eq": "%Doe%",
"$ne": "%Smith%"
},
"id": {
"$eq": "00000000-0000-0000-0000-000000000001",
"$in": [
"00000000-0000-0000-0000-000000000001"
],
"$ne": "00000000-0000-0000-0000-000000000002",
"$nin": [
"00000000-0000-0000-0000-000000000002"
]
}
},
"expect": {
"success": true,
"sql": [
[
"(SELECT jsonb_build_object(",
" 'addresses',",
" (SELECT COALESCE(jsonb_agg(jsonb_build_object(",
" 'archived', t1_obj_t2_addresses_t3.archived,",
" 'created_at', t1_obj_t2_addresses_t3.created_at,",
" 'id', t1_obj_t2_addresses_t3.id,",
" 'is_primary', t1_obj_t2_addresses_t1.is_primary,",
" 'name', t1_obj_t2_addresses_t3.name,",
" 'target',",
" (SELECT jsonb_build_object(",
" 'archived', t1_obj_t2_addresses_t3_target_t2.archived,",
" 'city', t1_obj_t2_addresses_t3_target_t1.city,",
" 'created_at', t1_obj_t2_addresses_t3_target_t2.created_at,",
" 'id', t1_obj_t2_addresses_t3_target_t2.id,",
" 'name', t1_obj_t2_addresses_t3_target_t2.name,",
" 'type', t1_obj_t2_addresses_t3_target_t2.type",
" )",
" FROM agreego.address t1_obj_t2_addresses_t3_target_t1",
" JOIN agreego.entity t1_obj_t2_addresses_t3_target_t2 ON t1_obj_t2_addresses_t3_target_t2.id = t1_obj_t2_addresses_t3_target_t1.id",
" WHERE",
" NOT t1_obj_t2_addresses_t3_target_t1.archived",
" AND t1_obj_t2_addresses_t3_target_t1.parent_id = t1_obj_t2_addresses_t3.id",
" ),",
" 'type', t1_obj_t2_addresses_t3.type",
" )), '[]'::jsonb)",
" FROM agreego.contact t1_obj_t2_addresses_t1",
" JOIN agreego.relationship t1_obj_t2_addresses_t2 ON t1_obj_t2_addresses_t2.id = t1_obj_t2_addresses_t1.id",
" JOIN agreego.entity t1_obj_t2_addresses_t3 ON t1_obj_t2_addresses_t3.id = t1_obj_t2_addresses_t2.id",
" WHERE",
" NOT t1_obj_t2_addresses_t1.archived",
" AND t1_obj_t2_addresses_t1.parent_id = t1_obj_t2.id),",
" 'age', t1_obj_t1.age,",
" 'archived', t1_obj_t2.archived,",
" 'contacts',",
" (SELECT COALESCE(jsonb_agg(jsonb_build_object(",
" 'archived', t1_obj_t2_contacts_t3.archived,",
" 'created_at', t1_obj_t2_contacts_t3.created_at,",
" 'id', t1_obj_t2_contacts_t3.id,",
" 'is_primary', t1_obj_t2_contacts_t1.is_primary,",
" 'name', t1_obj_t2_contacts_t3.name,",
" 'target', t1_obj_t2_contacts_t3.target,",
" 'type', t1_obj_t2_contacts_t3.type",
" )), '[]'::jsonb)",
" FROM agreego.contact t1_obj_t2_contacts_t1",
" JOIN agreego.relationship t1_obj_t2_contacts_t2 ON t1_obj_t2_contacts_t2.id = t1_obj_t2_contacts_t1.id",
" JOIN agreego.entity t1_obj_t2_contacts_t3 ON t1_obj_t2_contacts_t3.id = t1_obj_t2_contacts_t2.id",
" WHERE",
" NOT t1_obj_t2_contacts_t1.archived",
" AND t1_obj_t2_contacts_t1.parent_id = t1_obj_t2.id),",
" 'created_at', t1_obj_t2.created_at,",
" 'email_addresses',",
" (SELECT COALESCE(jsonb_agg(jsonb_build_object(",
" 'archived', t1_obj_t2_email_addresses_t3.archived,",
" 'created_at', t1_obj_t2_email_addresses_t3.created_at,",
" 'id', t1_obj_t2_email_addresses_t3.id,",
" 'is_primary', t1_obj_t2_email_addresses_t1.is_primary,",
" 'name', t1_obj_t2_email_addresses_t3.name,",
" 'target',",
" (SELECT jsonb_build_object(",
" 'address', t1_obj_t2_email_addresses_t3_target_t1.address,",
" 'archived', t1_obj_t2_email_addresses_t3_target_t2.archived,",
" 'created_at', t1_obj_t2_email_addresses_t3_target_t2.created_at,",
" 'id', t1_obj_t2_email_addresses_t3_target_t2.id,",
" 'name', t1_obj_t2_email_addresses_t3_target_t2.name,",
" 'type', t1_obj_t2_email_addresses_t3_target_t2.type",
" )",
" FROM agreego.email_address t1_obj_t2_email_addresses_t3_target_t1",
" JOIN agreego.entity t1_obj_t2_email_addresses_t3_target_t2 ON t1_obj_t2_email_addresses_t3_target_t2.id = t1_obj_t2_email_addresses_t3_target_t1.id",
" WHERE",
" NOT t1_obj_t2_email_addresses_t3_target_t1.archived",
" AND t1_obj_t2_email_addresses_t3_target_t1.parent_id = t1_obj_t2_email_addresses_t3.id",
" ),",
" 'type', t1_obj_t2_email_addresses_t3.type",
" )), '[]'::jsonb)",
" FROM agreego.contact t1_obj_t2_email_addresses_t1",
" JOIN agreego.relationship t1_obj_t2_email_addresses_t2 ON t1_obj_t2_email_addresses_t2.id = t1_obj_t2_email_addresses_t1.id",
" JOIN agreego.entity t1_obj_t2_email_addresses_t3 ON t1_obj_t2_email_addresses_t3.id = t1_obj_t2_email_addresses_t2.id",
" WHERE",
" NOT t1_obj_t2_email_addresses_t1.archived",
" AND t1_obj_t2_email_addresses_t1.parent_id = t1_obj_t2.id),",
" 'first_name', t1_obj_t1.first_name,",
" 'id', t1_obj_t2.id,",
" 'last_name', t1_obj_t1.last_name,",
" 'name', t1_obj_t2.name,",
" 'phone_numbers',",
" (SELECT COALESCE(jsonb_agg(jsonb_build_object(",
" 'archived', t1_obj_t2_phone_numbers_t3.archived,",
" 'created_at', t1_obj_t2_phone_numbers_t3.created_at,",
" 'id', t1_obj_t2_phone_numbers_t3.id,",
" 'is_primary', t1_obj_t2_phone_numbers_t1.is_primary,",
" 'name', t1_obj_t2_phone_numbers_t3.name,",
" 'target',",
" (SELECT jsonb_build_object(",
" 'archived', t1_obj_t2_phone_numbers_t3_target_t2.archived,",
" 'created_at', t1_obj_t2_phone_numbers_t3_target_t2.created_at,",
" 'id', t1_obj_t2_phone_numbers_t3_target_t2.id,",
" 'name', t1_obj_t2_phone_numbers_t3_target_t2.name,",
" 'number', t1_obj_t2_phone_numbers_t3_target_t1.number,",
" 'type', t1_obj_t2_phone_numbers_t3_target_t2.type",
" )",
" FROM agreego.phone_number t1_obj_t2_phone_numbers_t3_target_t1",
" JOIN agreego.entity t1_obj_t2_phone_numbers_t3_target_t2 ON t1_obj_t2_phone_numbers_t3_target_t2.id = t1_obj_t2_phone_numbers_t3_target_t1.id",
" WHERE",
" NOT t1_obj_t2_phone_numbers_t3_target_t1.archived",
" AND t1_obj_t2_phone_numbers_t3_target_t1.parent_id = t1_obj_t2_phone_numbers_t3.id",
" ),",
" 'type', t1_obj_t2_phone_numbers_t3.type",
" )), '[]'::jsonb)",
" FROM agreego.contact t1_obj_t2_phone_numbers_t1",
" JOIN agreego.relationship t1_obj_t2_phone_numbers_t2 ON t1_obj_t2_phone_numbers_t2.id = t1_obj_t2_phone_numbers_t1.id",
" JOIN agreego.entity t1_obj_t2_phone_numbers_t3 ON t1_obj_t2_phone_numbers_t3.id = t1_obj_t2_phone_numbers_t2.id",
" WHERE",
" NOT t1_obj_t2_phone_numbers_t1.archived",
" AND t1_obj_t2_phone_numbers_t1.parent_id = t1_obj_t2.id),",
" 'type', t1_obj_t2.type",
")",
"FROM agreego.person t1_obj_t1",
"JOIN agreego.entity t1_obj_t2 ON t1_obj_t2.id = t1_obj_t1.id",
"WHERE",
" NOT t1_obj_t1.archived",
" AND t1_obj_t2.archived = ($1#>>'{}')::boolean",
" AND t1_obj_t1.first_name ILIKE $2#>>'{}'",
" AND t1_obj_t1.last_name ILIKE $3#>>'{}')"
"DUMMY TO FAIL AND DUMP SQL"
]
]
}
},
{
"description": "Should extract the targeted subset payload specifically for a high-level nested list",
"description": "Full person stem query on phone number contact",
"action": "query",
"schema_id": "full.person",
"stem": "phone_numbers/contact",
@ -879,7 +812,7 @@
}
},
{
"description": "Should successfully execute nested path extraction for targeted root subgraphs on beats",
"description": "Full person stem query on phone number contact on phone number",
"action": "query",
"schema_id": "full.person",
"stem": "phone_numbers/contact/phone_number",
@ -902,7 +835,7 @@
}
},
{
"description": "Should successfully resolve unique execution plans across nested properties inside relationships containing oneOf configurations",
"description": "Full person stem query on contact email address",
"action": "query",
"schema_id": "full.person",
"stem": "contacts/contact/email_address",

View File

@ -1,6 +1,9 @@
#[cfg(test)]
use crate::database::executors::DatabaseExecutor;
#[cfg(test)]
use regex::Regex;
#[cfg(test)]
use serde_json::Value;
#[cfg(test)]
use std::cell::RefCell;
@ -9,6 +12,7 @@ pub struct MockState {
pub captured_queries: Vec<String>,
pub query_responses: Vec<Result<Value, String>>,
pub execute_responses: Vec<Result<(), String>>,
pub mocks: Vec<Value>,
}
#[cfg(test)]
@ -18,6 +22,7 @@ impl MockState {
captured_queries: Default::default(),
query_responses: Default::default(),
execute_responses: Default::default(),
mocks: Default::default(),
}
}
}
@ -44,6 +49,15 @@ impl DatabaseExecutor for MockExecutor {
MOCK_STATE.with(|state| {
let mut s = state.borrow_mut();
s.captured_queries.push(sql.to_string());
if !s.mocks.is_empty() {
if let Some(matches) = parse_and_match_mocks(sql, &s.mocks) {
if !matches.is_empty() {
return Ok(Value::Array(matches));
}
}
}
if s.query_responses.is_empty() {
return Ok(Value::Array(vec![]));
}
@ -76,6 +90,13 @@ impl DatabaseExecutor for MockExecutor {
MOCK_STATE.with(|state| state.borrow().captured_queries.clone())
}
#[cfg(test)]
fn set_mocks(&self, mocks: Vec<Value>) {
MOCK_STATE.with(|state| {
state.borrow_mut().mocks = mocks;
});
}
#[cfg(test)]
fn reset_mocks(&self) {
MOCK_STATE.with(|state| {
@ -83,6 +104,93 @@ impl DatabaseExecutor for MockExecutor {
s.captured_queries.clear();
s.query_responses.clear();
s.execute_responses.clear();
s.mocks.clear();
});
}
}
#[cfg(test)]
fn parse_and_match_mocks(sql: &str, mocks: &[Value]) -> Option<Vec<Value>> {
let sql_upper = sql.to_uppercase();
if !sql_upper.starts_with("SELECT") {
return None;
}
// 1. Extract table name
let table_regex = Regex::new(r#"(?i)\s+FROM\s+(?:[a-zA-Z_]\w*\.)?"?([a-zA-Z_]\w*)"?"#).ok()?;
let table = if let Some(caps) = table_regex.captures(sql) {
caps.get(1)?.as_str()
} else {
return None;
};
// 2. Extract WHERE conditions
let mut conditions = Vec::new();
if let Some(where_idx) = sql_upper.find(" WHERE ") {
let mut where_end = sql_upper.find(" ORDER BY ").unwrap_or(sql.len());
if let Some(limit_idx) = sql_upper.find(" LIMIT ") {
if limit_idx < where_end {
where_end = limit_idx;
}
}
let where_clause = &sql[where_idx + 7..where_end];
let and_regex = Regex::new(r"(?i)\s+AND\s+").ok()?;
let parts = and_regex.split(where_clause);
for part in parts {
if let Some(eq_idx) = part.find('=') {
let left = part[..eq_idx]
.trim()
.split('.')
.last()
.unwrap_or("")
.trim_matches('"');
let right = part[eq_idx + 1..].trim().trim_matches('\'');
conditions.push((left.to_string(), right.to_string()));
} else if part.to_uppercase().contains(" IS NULL") {
let left = part[..part.to_uppercase().find(" IS NULL").unwrap()]
.trim()
.split('.')
.last()
.unwrap_or("")
.replace('"', ""); // Remove quotes explicitly
conditions.push((left, "null".to_string()));
}
}
}
// 3. Find matching mocks
let mut matches = Vec::new();
for mock in mocks {
if let Some(mock_obj) = mock.as_object() {
if let Some(t) = mock_obj.get("type") {
if t.as_str() != Some(table) {
continue;
}
}
let mut matches_all = true;
for (k, v) in &conditions {
let mock_val_str = match mock_obj.get(k) {
Some(Value::String(s)) => s.clone(),
Some(Value::Number(n)) => n.to_string(),
Some(Value::Bool(b)) => b.to_string(),
Some(Value::Null) => "null".to_string(),
_ => {
matches_all = false;
break;
}
};
if mock_val_str != *v {
matches_all = false;
break;
}
}
if matches_all {
matches.push(mock.clone());
}
}
}
Some(matches)
}

View File

@ -25,4 +25,7 @@ pub trait DatabaseExecutor: Send + Sync {
#[cfg(test)]
fn reset_mocks(&self);
#[cfg(test)]
fn set_mocks(&self, mocks: Vec<Value>);
}

View File

@ -396,6 +396,19 @@ impl Merger {
let mut lookup_complete = false;
if !entity_type.lookup_fields.is_empty() {
lookup_complete = true;
for column in &entity_type.lookup_fields {
match entity_fields.get(column) {
Some(Value::Null) | None => {
lookup_complete = false;
break;
}
Some(Value::String(s)) if s.is_empty() => {
lookup_complete = false;
break;
}
_ => {}
}
}
}
if id_val.is_none() && !lookup_complete {
@ -433,14 +446,10 @@ impl Merger {
let val = entity_fields.get(column).unwrap_or(&Value::Null);
if column == "type" {
lookup_predicates.push(format!("t1.\"{}\" = {}", column, Self::quote_literal(val)));
} else {
if val.as_str() == Some("") || val.is_null() {
lookup_predicates.push(format!("\"{}\" IS NULL", column));
} else {
lookup_predicates.push(format!("\"{}\" = {}", column, Self::quote_literal(val)));
}
}
}
format!("WHERE {}", lookup_predicates.join(" AND "))
} else {
return Ok(None);

View File

@ -202,168 +202,38 @@ impl SqlCompiler {
is_stem_query: bool,
depth: usize,
) -> Result<(String, String), String> {
// We are compiling a query block for an Entity.
let mut select_args = Vec::new();
// Mapping table hierarchy to aliases, e.g., ["person", "user", "organization", "entity"]
let local_ctx = format!("{}_{}", parent_alias, prop_name.unwrap_or("obj"));
// e.g., parent_t1_contact -> we'll use t1 for the first of this block, t2 for the second, etc.
// Actually, local_ctx can just be exactly that prop's unique path.
let mut table_aliases = std::collections::HashMap::new();
let mut from_clauses = Vec::new();
for (i, table_name) in type_def.hierarchy.iter().enumerate() {
let alias = format!("{}_t{}", local_ctx, i + 1);
table_aliases.insert(table_name.clone(), alias.clone());
// 1. Build FROM clauses and table aliases
let (mut table_aliases, from_clauses) = self.build_hierarchy_from_clauses(type_def, &local_ctx);
if i == 0 {
from_clauses.push(format!("agreego.{} {}", table_name, alias));
} else {
// Join to previous
let prev_alias = format!("{}_t{}", local_ctx, i);
from_clauses.push(format!(
"JOIN agreego.{} {} ON {}.id = {}.id",
table_name, alias, alias, prev_alias
));
}
}
// Now, let's map properties from the schema to the correct table alias using grouped_fields
// grouped_fields is { "person": ["first_name", ...], "user": ["password"], ... }
let grouped_fields = type_def.grouped_fields.as_ref().and_then(|v| v.as_object());
let merged_props = self.get_merged_properties(schema);
for (prop_key, prop_schema) in &merged_props {
// Find which table owns this property
// Find which table owns this property
let mut owner_alias = table_aliases
.get("entity")
.cloned()
.unwrap_or_else(|| format!("{}_t_err", parent_alias));
if let Some(gf) = grouped_fields {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(prop_key)) {
owner_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| parent_alias.to_string());
break;
}
}
}
}
// Now we know `owner_alias`, e.g., `parent_t1` or `parent_t3`.
// Walk the property to get its SQL value
let (val_sql, val_type) = self.walk_schema(
prop_schema,
&owner_alias,
Some(prop_key),
// 2. Map properties and build jsonb_build_object args
let select_args = self.map_properties_to_aliases(
schema,
type_def,
&table_aliases,
parent_alias,
filter_keys,
is_stem_query,
depth + 1,
depth,
)?;
if val_type == "abort" {
continue;
}
select_args.push(format!("'{}', {}", prop_key, val_sql));
}
let jsonb_obj_sql = if select_args.is_empty() {
"jsonb_build_object()".to_string()
} else {
format!("jsonb_build_object({})", select_args.join(", "))
};
let base_alias = table_aliases
.get(&type_def.name)
.cloned()
.unwrap_or_else(|| "err".to_string());
// 3. Build WHERE clauses
let mut where_clauses = self.build_filter_where_clauses(
schema,
type_def,
&table_aliases,
parent_alias,
prop_name,
filter_keys,
)?;
let mut where_clauses = Vec::new();
where_clauses.push(format!("NOT {}.archived", base_alias));
// Filter Mapping - Only append filters if this is the ROOT table query (i.e. parent_alias is "t1")
// Because cue.filters operates strictly on top-level root properties right now.
if parent_alias == "t1" {
for (i, filter_key) in filter_keys.iter().enumerate() {
// Find which table owns this filter key
let mut filter_alias = base_alias.clone(); // default to root table (e.g. t3 entity)
if let Some(gf) = type_def.grouped_fields.as_ref().and_then(|v| v.as_object()) {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(filter_key)) {
filter_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| base_alias.clone());
break;
}
}
}
}
let mut is_ilike = false;
let mut cast = "";
// Use PostgreSQL column type metadata for exact argument casting
if let Some(field_types) = type_def.field_types.as_ref().and_then(|v| v.as_object()) {
if let Some(pg_type_val) = field_types.get(filter_key) {
if let Some(pg_type) = pg_type_val.as_str() {
if pg_type == "uuid" {
cast = "::uuid";
} else if pg_type == "boolean" || pg_type == "bool" {
cast = "::boolean";
} else if pg_type.contains("timestamp")
|| pg_type == "timestamptz"
|| pg_type == "date"
{
cast = "::timestamptz";
} else if pg_type == "numeric"
|| pg_type.contains("int")
|| pg_type == "real"
|| pg_type == "double precision"
{
cast = "::numeric";
} else if pg_type == "text" || pg_type.contains("char") {
// Determine if this is an enum in the schema locally to avoid ILIKE on strict enums
let mut is_enum = false;
if let Some(props) = &schema.obj.properties {
if let Some(ps) = props.get(filter_key) {
is_enum = ps.obj.enum_.is_some();
}
}
if !is_enum {
is_ilike = true;
}
}
}
}
}
// Add to WHERE clause using 1-indexed args pointer: $1, $2
if is_ilike {
let param = format!("${}#>>'{{}}'", i + 1);
where_clauses.push(format!("{}.{} ILIKE {}", filter_alias, filter_key, param));
} else {
let param = format!("(${}#>>'{{}}'){}", i + 1, cast);
where_clauses.push(format!("{}.{} = {}", filter_alias, filter_key, param));
}
}
}
// Resolve FK relationship constraint if this is a nested subquery
if let Some(_prop) = prop_name {
// MOCK relation resolution (will integrate with `get_entity_relation` properly)
// By default assume FK is parent_id on child
where_clauses.push(format!("{}.parent_id = {}.id", base_alias, parent_alias));
}
// Wrap the object in the final array or object SELECT
let selection = if is_array {
format!("COALESCE(jsonb_agg({}), '[]'::jsonb)", jsonb_obj_sql)
} else {
@ -387,6 +257,219 @@ impl SqlCompiler {
))
}
fn build_hierarchy_from_clauses(
&self,
type_def: &crate::database::r#type::Type,
local_ctx: &str,
) -> (std::collections::HashMap<String, String>, Vec<String>) {
let mut table_aliases = std::collections::HashMap::new();
let mut from_clauses = Vec::new();
for (i, table_name) in type_def.hierarchy.iter().enumerate() {
let alias = format!("{}_t{}", local_ctx, i + 1);
table_aliases.insert(table_name.clone(), alias.clone());
if i == 0 {
from_clauses.push(format!("agreego.{} {}", table_name, alias));
} else {
let prev_alias = format!("{}_t{}", local_ctx, i);
from_clauses.push(format!(
"JOIN agreego.{} {} ON {}.id = {}.id",
table_name, alias, alias, prev_alias
));
}
}
(table_aliases, from_clauses)
}
fn map_properties_to_aliases(
&self,
schema: &crate::database::schema::Schema,
type_def: &crate::database::r#type::Type,
table_aliases: &std::collections::HashMap<String, String>,
parent_alias: &str,
filter_keys: &[String],
is_stem_query: bool,
depth: usize,
) -> Result<Vec<String>, String> {
let mut select_args = Vec::new();
let grouped_fields = type_def.grouped_fields.as_ref().and_then(|v| v.as_object());
let merged_props = self.get_merged_properties(schema);
for (prop_key, prop_schema) in &merged_props {
let mut owner_alias = table_aliases
.get("entity")
.cloned()
.unwrap_or_else(|| format!("{}_t_err", parent_alias));
if let Some(gf) = grouped_fields {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(prop_key)) {
owner_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| parent_alias.to_string());
break;
}
}
}
}
let (val_sql, val_type) = self.walk_schema(
prop_schema,
&owner_alias,
Some(prop_key),
filter_keys,
is_stem_query,
depth + 1,
)?;
if val_type != "abort" {
select_args.push(format!("'{}', {}", prop_key, val_sql));
}
}
Ok(select_args)
}
fn build_filter_where_clauses(
&self,
schema: &crate::database::schema::Schema,
type_def: &crate::database::r#type::Type,
table_aliases: &std::collections::HashMap<String, String>,
parent_alias: &str,
prop_name: Option<&str>,
filter_keys: &[String],
) -> Result<Vec<String>, String> {
let base_alias = table_aliases
.get(&type_def.name)
.cloned()
.unwrap_or_else(|| "err".to_string());
let mut where_clauses = Vec::new();
where_clauses.push(format!("NOT {}.archived", base_alias));
if parent_alias == "t1" {
for (i, filter_key) in filter_keys.iter().enumerate() {
let mut parts = filter_key.split(':');
let field_name = parts.next().unwrap_or(filter_key);
let op = parts.next().unwrap_or("$eq");
let mut filter_alias = base_alias.clone();
if let Some(gf) = type_def.grouped_fields.as_ref().and_then(|v| v.as_object()) {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(field_name)) {
filter_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| base_alias.clone());
break;
}
}
}
}
let mut is_ilike = false;
let mut cast = "";
if let Some(field_types) = type_def.field_types.as_ref().and_then(|v| v.as_object()) {
if let Some(pg_type_val) = field_types.get(field_name) {
if let Some(pg_type) = pg_type_val.as_str() {
if pg_type == "uuid" {
cast = "::uuid";
} else if pg_type == "boolean" || pg_type == "bool" {
cast = "::boolean";
} else if pg_type.contains("timestamp")
|| pg_type == "timestamptz"
|| pg_type == "date"
{
cast = "::timestamptz";
} else if pg_type == "numeric"
|| pg_type.contains("int")
|| pg_type == "real"
|| pg_type == "double precision"
{
cast = "::numeric";
} else if pg_type == "text" || pg_type.contains("char") {
let mut is_enum = false;
if let Some(props) = &schema.obj.properties {
if let Some(ps) = props.get(field_name) {
is_enum = ps.obj.enum_.is_some();
}
}
if !is_enum {
is_ilike = true;
}
}
}
}
}
let param_index = i + 1;
let p_val = format!("${}#>>'{{}}'", param_index);
if op == "$in" || op == "$nin" {
let sql_op = if op == "$in" { "IN" } else { "NOT IN" };
let subquery = format!(
"(SELECT value{} FROM jsonb_array_elements_text(({})::jsonb))",
cast, p_val
);
where_clauses.push(format!(
"{}.{} {} {}",
filter_alias, field_name, sql_op, subquery
));
} else {
let sql_op = match op {
"$eq" => {
if is_ilike {
"ILIKE"
} else {
"="
}
}
"$ne" => {
if is_ilike {
"NOT ILIKE"
} else {
"!="
}
}
"$gt" => ">",
"$gte" => ">=",
"$lt" => "<",
"$lte" => "<=",
_ => {
if is_ilike {
"ILIKE"
} else {
"="
}
}
};
let param_sql = if is_ilike && (op == "$eq" || op == "$ne") {
p_val
} else {
format!("({}){}", p_val, cast)
};
where_clauses.push(format!(
"{}.{} {} {}",
filter_alias, field_name, sql_op, param_sql
));
}
}
}
if let Some(_prop) = prop_name {
where_clauses.push(format!("{}.parent_id = {}.id", base_alias, parent_alias));
}
Ok(where_clauses)
}
fn compile_inline_object(
&self,
props: &std::collections::BTreeMap<String, std::sync::Arc<crate::database::schema::Schema>>,

View File

@ -24,61 +24,105 @@ impl Queryer {
stem_opt: Option<&str>,
filters: Option<&serde_json::Value>,
) -> crate::drop::Drop {
let filters_map: Option<&serde_json::Map<String, serde_json::Value>> =
filters.and_then(|f| f.as_object());
let filters_map = filters.and_then(|f| f.as_object());
// Generate Permutation Cache Key: schema_id + sorted filter keys
let mut filter_keys: Vec<String> = Vec::new();
if let Some(fm) = filters_map {
for key in fm.keys() {
filter_keys.push(key.clone());
}
}
filter_keys.sort();
let stem_key = stem_opt.unwrap_or("/");
let cache_key = format!("{}(Stem:{}):{}", schema_id, stem_key, filter_keys.join(","));
let sql = if let Some(cached_sql) = self.cache.get(&cache_key) {
cached_sql.value().clone()
} else {
// Compile the massive base SQL string
let compiler = compiler::SqlCompiler::new(self.db.clone());
match compiler.compile(schema_id, stem_opt, &filter_keys) {
Ok(compiled_sql) => {
self.cache.insert(cache_key.clone(), compiled_sql.clone());
compiled_sql
}
Err(e) => {
// 1. Process filters into structured $op keys and linear values
let (filter_keys, args) = match self.parse_filter_entries(filters_map) {
Ok(res) => res,
Err(msg) => {
return crate::drop::Drop::with_errors(vec![crate::drop::Error {
code: "QUERY_COMPILATION_FAILED".to_string(),
message: e,
code: "FILTER_PARSE_FAILED".to_string(),
message: msg,
details: crate::drop::ErrorDetails {
path: schema_id.to_string(),
},
}]);
}
}
};
// 2. Prepare the execution arguments from the filters
let mut args: Vec<serde_json::Value> = Vec::new();
let stem_key = stem_opt.unwrap_or("/");
let cache_key = format!("{}(Stem:{}):{}", schema_id, stem_key, filter_keys.join(","));
if let Some(fm) = filters_map {
for (_i, key) in filter_keys.iter().enumerate() {
if let Some(val) = fm.get(key) {
args.push(val.clone());
}
}
}
// 2. Fetch from cache or compile
let sql = match self.get_or_compile_sql(&cache_key, schema_id, stem_opt, &filter_keys) {
Ok(sql) => sql,
Err(drop) => return drop,
};
// 3. Execute via Database Executor
match self.db.query(&sql, Some(&args)) {
self.execute_sql(schema_id, &sql, &args)
}
fn parse_filter_entries(
&self,
filters_map: Option<&serde_json::Map<String, serde_json::Value>>,
) -> Result<(Vec<String>, Vec<serde_json::Value>), String> {
let mut filter_entries: Vec<(String, serde_json::Value)> = Vec::new();
if let Some(fm) = filters_map {
for (key, val) in fm {
if let Some(obj) = val.as_object() {
for (op, op_val) in obj {
if !op.starts_with('$') {
return Err(format!("Filter operator must start with '$', got: {}", op));
}
filter_entries.push((format!("{}:{}", key, op), op_val.clone()));
}
} else {
return Err(format!(
"Filter for field '{}' must be an object with operators like $eq, $in, etc.",
key
));
}
}
}
filter_entries.sort_by(|a, b| a.0.cmp(&b.0));
let filter_keys: Vec<String> = filter_entries.iter().map(|(k, _)| k.clone()).collect();
let args: Vec<serde_json::Value> = filter_entries.into_iter().map(|(_, v)| v).collect();
Ok((filter_keys, args))
}
fn get_or_compile_sql(
&self,
cache_key: &str,
schema_id: &str,
stem_opt: Option<&str>,
filter_keys: &[String],
) -> Result<String, crate::drop::Drop> {
if let Some(cached_sql) = self.cache.get(cache_key) {
return Ok(cached_sql.value().clone());
}
let compiler = compiler::SqlCompiler::new(self.db.clone());
match compiler.compile(schema_id, stem_opt, filter_keys) {
Ok(compiled_sql) => {
self
.cache
.insert(cache_key.to_string(), compiled_sql.clone());
Ok(compiled_sql)
}
Err(e) => Err(crate::drop::Drop::with_errors(vec![crate::drop::Error {
code: "QUERY_COMPILATION_FAILED".to_string(),
message: e,
details: crate::drop::ErrorDetails {
path: schema_id.to_string(),
},
}])),
}
}
fn execute_sql(
&self,
schema_id: &str,
sql: &str,
args: &[serde_json::Value],
) -> crate::drop::Drop {
match self.db.query(sql, Some(args)) {
Ok(serde_json::Value::Array(table)) => {
if table.is_empty() {
crate::drop::Drop::success_with_val(serde_json::Value::Null)
} else {
// We expect the query to return a single JSONB column, already unpacked from row[0]
crate::drop::Drop::success_with_val(table.first().unwrap().clone())
}
}

View File

@ -1463,12 +1463,6 @@ fn test_queryer_0_8() {
crate::tests::runner::run_test_case(&path, 0, 8).unwrap();
}
#[test]
fn test_queryer_0_9() {
let path = format!("{}/fixtures/queryer.json", env!("CARGO_MANIFEST_DIR"));
crate::tests::runner::run_test_case(&path, 0, 9).unwrap();
}
#[test]
fn test_not_0_0() {
let path = format!("{}/fixtures/not.json", env!("CARGO_MANIFEST_DIR"));
@ -8536,3 +8530,21 @@ fn test_merger_0_4() {
let path = format!("{}/fixtures/merger.json", env!("CARGO_MANIFEST_DIR"));
crate::tests::runner::run_test_case(&path, 0, 4).unwrap();
}
#[test]
fn test_merger_0_5() {
let path = format!("{}/fixtures/merger.json", env!("CARGO_MANIFEST_DIR"));
crate::tests::runner::run_test_case(&path, 0, 5).unwrap();
}
#[test]
fn test_merger_0_6() {
let path = format!("{}/fixtures/merger.json", env!("CARGO_MANIFEST_DIR"));
crate::tests::runner::run_test_case(&path, 0, 6).unwrap();
}
#[test]
fn test_merger_0_7() {
let path = format!("{}/fixtures/merger.json", env!("CARGO_MANIFEST_DIR"));
crate::tests::runner::run_test_case(&path, 0, 7).unwrap();
}

View File

@ -95,17 +95,6 @@ pub fn run_test_case(path: &str, suite_idx: usize, case_idx: usize) -> Result<()
let mut failures = Vec::<String>::new();
// 4. Run Tests
// Provide fallback for legacy expectations if `expect` block was missing despite migration script
let _expected_success = test
.expect
.as_ref()
.map(|e| e.success)
.unwrap_or(test.valid.unwrap_or(false));
let _expected_errors = test
.expect
.as_ref()
.and_then(|e| e.errors.clone())
.unwrap_or(test.expect_errors.clone().unwrap_or(vec![]));
match test.action.as_str() {
"validate" => {

View File

@ -31,10 +31,6 @@ pub struct TestCase {
pub mocks: Option<serde_json::Value>,
pub expect: Option<ExpectBlock>,
// Legacy support for older tests to avoid migrating them all instantly
pub valid: Option<bool>,
pub expect_errors: Option<Vec<serde_json::Value>>,
}
fn default_action() -> String {
@ -59,18 +55,7 @@ impl TestCase {
let validator = Validator::new(db);
let expected_success = self
.expect
.as_ref()
.map(|e| e.success)
.unwrap_or(self.valid.unwrap_or(false));
// _expected_errors is preserved for future diffing if needed
let _expected_errors = self
.expect
.as_ref()
.and_then(|e| e.errors.clone())
.unwrap_or(self.expect_errors.clone().unwrap_or(vec![]));
let expected_success = self.expect.as_ref().map(|e| e.success).unwrap_or(false);
let schema_id = &self.schema_id;
if !validator.db.schemas.contains_key(schema_id) {
@ -102,6 +87,12 @@ impl TestCase {
}
pub fn run_merge(&self, db: Arc<Database>) -> Result<(), String> {
if let Some(mocks) = &self.mocks {
if let Some(arr) = mocks.as_array() {
db.executor.set_mocks(arr.clone());
}
}
use crate::merger::Merger;
let merger = Merger::new(db.clone());
@ -134,6 +125,12 @@ impl TestCase {
}
pub fn run_query(&self, db: Arc<Database>) -> Result<(), String> {
if let Some(mocks) = &self.mocks {
if let Some(arr) = mocks.as_array() {
db.executor.set_mocks(arr.clone());
}
}
use crate::queryer::Queryer;
let queryer = Queryer::new(db.clone());