query test progress

This commit is contained in:
2026-03-10 18:25:29 -04:00
parent bb263190f6
commit 1c08a8f2b8
20 changed files with 1949 additions and 225 deletions

111
src/database/executor.rs Normal file
View File

@ -0,0 +1,111 @@
use pgrx::prelude::*;
use serde_json::Value;
/// An abstraction over database execution to allow for isolated unit testing
/// without a live Postgres SPI connection.
pub trait DatabaseExecutor: Send + Sync {
/// Executes a query expecting a single JSONB return, representing rows.
fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String>;
/// Executes an operation (INSERT, UPDATE, DELETE, or pg_notify) that does not return rows.
fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String>;
/// Returns the current authenticated user's ID
fn auth_user_id(&self) -> Result<String, String>;
/// Returns the current transaction timestamp
fn timestamp(&self) -> Result<String, String>;
}
/// The production executor that wraps `pgrx::spi::Spi`.
pub struct SpiExecutor;
impl SpiExecutor {
pub fn new() -> Self {
Self {}
}
}
impl DatabaseExecutor for SpiExecutor {
fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String> {
let mut json_args = Vec::new();
let mut args_with_oid: Vec<pgrx::datum::DatumWithOid> = Vec::new();
if let Some(params) = args {
for val in params {
json_args.push(pgrx::JsonB(val.clone()));
}
for j_val in json_args.into_iter() {
args_with_oid.push(pgrx::datum::DatumWithOid::from(j_val));
}
}
Spi::connect(|client| {
match client.select(sql, Some(args_with_oid.len() as i64), &args_with_oid) {
Ok(tup_table) => {
let mut results = Vec::new();
for row in tup_table {
if let Ok(Some(jsonb)) = row.get::<pgrx::JsonB>(1) {
results.push(jsonb.0);
}
}
Ok(Value::Array(results))
}
Err(e) => Err(format!("SPI Query Fetch Failure: {}", e)),
}
})
}
fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String> {
let mut json_args = Vec::new();
let mut args_with_oid: Vec<pgrx::datum::DatumWithOid> = Vec::new();
if let Some(params) = args {
for val in params {
json_args.push(pgrx::JsonB(val.clone()));
}
for j_val in json_args.into_iter() {
args_with_oid.push(pgrx::datum::DatumWithOid::from(j_val));
}
}
Spi::connect_mut(|client| {
match client.update(sql, Some(args_with_oid.len() as i64), &args_with_oid) {
Ok(_) => Ok(()),
Err(e) => Err(format!("SPI Execution Failure: {}", e)),
}
})
}
fn auth_user_id(&self) -> Result<String, String> {
Spi::connect(|client| {
let mut tup_table = client
.select(
"SELECT COALESCE(current_setting('auth.user_id', true), 'ffffffff-ffff-ffff-ffff-ffffffffffff')",
None,
&[],
)
.map_err(|e| format!("SPI Select Error: {}", e))?;
let row = tup_table
.next()
.ok_or("No user id setting returned from context".to_string())?;
let user_id: Option<String> = row.get(1).map_err(|e| e.to_string())?;
user_id.ok_or("Missing user_id".to_string())
})
}
fn timestamp(&self) -> Result<String, String> {
Spi::connect(|client| {
let mut tup_table = client
.select("SELECT clock_timestamp()::text", None, &[])
.map_err(|e| format!("SPI Select Error: {}", e))?;
let row = tup_table
.next()
.ok_or("No clock timestamp returned".to_string())?;
let timestamp: Option<String> = row.get(1).map_err(|e| e.to_string())?;
timestamp.ok_or("Missing timestamp".to_string())
})
}
}

View File

@ -1,23 +1,30 @@
pub mod r#enum;
pub mod executor;
pub mod formats;
pub mod page;
pub mod punc;
pub mod relation;
pub mod schema;
pub mod r#type;
use crate::database::r#enum::Enum;
use crate::database::punc::Punc;
use crate::database::executor::{DatabaseExecutor, SpiExecutor};
use crate::database::punc::{Punc, Stem};
use crate::database::relation::Relation;
use crate::database::schema::Schema;
use crate::database::r#type::Type;
use serde_json::Value;
use std::collections::{HashMap, HashSet};
pub struct Database {
pub enums: HashMap<String, Enum>,
pub types: HashMap<String, Type>,
pub puncs: HashMap<String, Punc>,
pub relations: HashMap<String, Relation>,
pub schemas: HashMap<String, Schema>,
pub descendants: HashMap<String, Vec<String>>,
pub depths: HashMap<String, usize>,
pub executor: Box<dyn DatabaseExecutor + Send + Sync>,
}
impl Database {
@ -25,10 +32,12 @@ impl Database {
let mut db = Self {
enums: HashMap::new(),
types: HashMap::new(),
relations: HashMap::new(),
puncs: HashMap::new(),
schemas: HashMap::new(),
descendants: HashMap::new(),
depths: HashMap::new(),
executor: Box::new(SpiExecutor::new()),
};
if let Some(arr) = val.get("enums").and_then(|v| v.as_array()) {
@ -47,6 +56,14 @@ impl Database {
}
}
if let Some(arr) = val.get("relations").and_then(|v| v.as_array()) {
for item in arr {
if let Ok(def) = serde_json::from_value::<Relation>(item.clone()) {
db.relations.insert(def.constraint.clone(), def);
}
}
}
if let Some(arr) = val.get("puncs").and_then(|v| v.as_array()) {
for item in arr {
if let Ok(def) = serde_json::from_value::<Punc>(item.clone()) {
@ -73,12 +90,39 @@ impl Database {
db
}
/// Override the default executor for unit testing
pub fn with_executor(mut self, executor: Box<dyn DatabaseExecutor + Send + Sync>) -> Self {
self.executor = executor;
self
}
/// Executes a query expecting a single JSONB array return, representing rows.
pub fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String> {
self.executor.query(sql, args)
}
/// Executes an operation (INSERT, UPDATE, DELETE, or pg_notify) that does not return rows.
pub fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String> {
self.executor.execute(sql, args)
}
/// Returns the current authenticated user's ID
pub fn auth_user_id(&self) -> Result<String, String> {
self.executor.auth_user_id()
}
/// Returns the current transaction timestamp
pub fn timestamp(&self) -> Result<String, String> {
self.executor.timestamp()
}
/// Organizes the graph of the database, compiling regex, format functions, and caching relationships.
fn compile(&mut self) -> Result<(), String> {
self.collect_schemas();
self.collect_depths();
self.collect_descendants();
self.compile_schemas();
self.collect_stems();
Ok(())
}
@ -184,4 +228,154 @@ impl Database {
}
}
}
fn collect_stems(&mut self) {
let mut st_map: HashMap<String, Vec<Stem>> = HashMap::new();
for (name, _) in &self.puncs {
let mut stems = Vec::new();
let response_id = format!("{}.response", name);
if let Some(resp_schema) = self.schemas.get(&response_id) {
Self::discover_stems(
&self.types,
&self.schemas,
&self.relations,
&response_id,
resp_schema,
String::from(""),
None,
None,
&mut stems,
);
}
st_map.insert(name.clone(), stems);
}
for (name, stems) in st_map {
if let Some(p) = self.puncs.get_mut(&name) {
p.stems = stems;
}
}
}
fn discover_stems(
types: &HashMap<String, Type>,
schemas: &HashMap<String, Schema>,
relations: &HashMap<String, Relation>,
_schema_id: &str,
schema: &Schema,
current_path: String,
parent_type: Option<String>,
property_name: Option<String>,
stems: &mut Vec<Stem>,
) {
let mut is_entity = false;
let mut entity_type = String::new();
// Check if this schema resolves to an Entity
let mut current_ref = schema.obj.r#ref.clone();
let mut depth = 0;
while let Some(r) = current_ref {
if types.contains_key(&r) {
is_entity = true;
entity_type = r.clone();
break;
}
if let Some(s) = schemas.get(&r) {
current_ref = s.obj.r#ref.clone();
} else {
break;
}
depth += 1;
if depth > 20 {
break;
} // prevent infinite loop
}
if is_entity {
let final_path = if current_path.is_empty() {
"/".to_string()
} else {
current_path.clone()
};
let mut relation_col = None;
if let (Some(pt), Some(prop)) = (&parent_type, &property_name) {
let expected_col = format!("{}_id", prop);
let mut found = false;
// Try to find the exact relation from the database schema
for rel in relations.values() {
if rel.source_type == *pt && rel.destination_type == entity_type {
if rel.source_columns.contains(&expected_col) {
relation_col = Some(expected_col.clone());
found = true;
break;
}
} else if rel.source_type == entity_type && rel.destination_type == *pt {
if rel.source_columns.contains(&expected_col) {
relation_col = Some(expected_col.clone());
found = true;
break;
}
}
}
if !found {
// Fallback guess if explicit matching fails
relation_col = Some(expected_col);
}
}
stems.push(Stem {
path: final_path,
r#type: entity_type.clone(),
relation: relation_col,
});
}
// Pass the new parent downwards
let next_parent = if is_entity {
Some(entity_type.clone())
} else {
parent_type.clone()
};
if let Some(props) = &schema.obj.properties {
for (k, v) in props {
let next_path = format!(
"{}/{}",
if current_path.is_empty() {
""
} else {
&current_path
},
k
);
Self::discover_stems(
types,
schemas,
relations,
"",
v,
next_path,
next_parent.clone(),
Some(k.clone()),
stems,
);
}
}
if let Some(items) = &schema.obj.items {
Self::discover_stems(
types,
schemas,
relations,
"",
items,
current_path.clone(),
next_parent.clone(),
property_name.clone(),
stems,
);
}
}
}

View File

@ -2,6 +2,14 @@ use crate::database::page::Page;
use crate::database::schema::Schema;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Stem {
pub path: String,
pub r#type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub relation: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(default)]
pub struct Punc {
@ -17,4 +25,6 @@ pub struct Punc {
pub page: Option<Page>,
#[serde(default)]
pub schemas: Vec<Schema>,
#[serde(default)]
pub stems: Vec<Stem>,
}

12
src/database/relation.rs Normal file
View File

@ -0,0 +1,12 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(default)]
pub struct Relation {
pub constraint: String,
pub source_type: String,
pub source_columns: Vec<String>,
pub destination_type: String,
pub destination_columns: Vec<String>,
pub prefix: Option<String>,
}

View File

@ -23,7 +23,8 @@ pub struct Type {
pub hierarchy: Vec<String>,
#[serde(default)]
pub variations: HashSet<String>,
pub relationship: Option<bool>,
#[serde(default)]
pub relationship: bool,
#[serde(default)]
pub fields: Vec<String>,
pub grouped_fields: Option<Value>,

View File

@ -16,8 +16,8 @@ impl Jspg {
let database_instance = Database::new(database_val);
let database = Arc::new(database_instance);
let validator = Validator::new(database.clone());
let queryer = Queryer::new();
let merger = Merger::new();
let queryer = Queryer::new(database.clone());
let merger = Merger::new(database.clone());
Self {
database,

View File

@ -9,7 +9,6 @@ pub mod merger;
pub mod queryer;
pub mod validator;
use serde_json::json;
use std::sync::{Arc, RwLock};
lazy_static::lazy_static! {
@ -22,7 +21,7 @@ lazy_static::lazy_static! {
}
#[pg_extern(strict)]
pub fn jspg_cache_database(database: JsonB) -> JsonB {
pub fn jspg_setup(database: JsonB) -> JsonB {
let new_jspg = crate::jspg::Jspg::new(&database.0);
let new_arc = Arc::new(new_jspg);
@ -35,10 +34,85 @@ pub fn jspg_cache_database(database: JsonB) -> JsonB {
let drop = crate::drop::Drop::success();
JsonB(serde_json::to_value(drop).unwrap())
}
#[pg_extern]
pub fn jspg_merge(data: JsonB) -> JsonB {
// Try to acquire a read lock to get a clone of the Engine Arc
let engine_opt = {
let lock = GLOBAL_JSPG.read().unwrap();
lock.clone()
};
match engine_opt {
Some(engine) => match engine.merger.merge(data.0) {
Ok(result) => JsonB(result),
Err(e) => {
let error = crate::drop::Error {
code: "MERGE_FAILED".to_string(),
message: e,
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
let drop = crate::drop::Drop::with_errors(vec![error]);
JsonB(serde_json::to_value(drop).unwrap())
}
},
None => {
let error = crate::drop::Error {
code: "VALIDATOR_NOT_INITIALIZED".to_string(),
message: "The JSPG database has not been cached yet. Run jspg_setup()".to_string(),
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
let drop = crate::drop::Drop::with_errors(vec![error]);
JsonB(serde_json::to_value(drop).unwrap())
}
}
}
#[pg_extern]
pub fn jspg_query(schema_id: &str, stem: Option<&str>, filters: Option<JsonB>) -> JsonB {
let engine_opt = {
let lock = GLOBAL_JSPG.read().unwrap();
lock.clone()
};
match engine_opt {
Some(engine) => match engine
.queryer
.query(schema_id, stem, filters.as_ref().map(|f| &f.0))
{
Ok(res) => JsonB(res),
Err(e) => {
let error = crate::drop::Error {
code: "QUERY_FAILED".to_string(),
message: e,
details: crate::drop::ErrorDetails {
path: schema_id.to_string(),
},
};
JsonB(serde_json::to_value(crate::drop::Drop::with_errors(vec![error])).unwrap())
}
},
None => {
let error = crate::drop::Error {
code: "ENGINE_NOT_INITIALIZED".to_string(),
message: "JSPG extension has not been initialized via jspg_setup".to_string(),
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
JsonB(serde_json::to_value(crate::drop::Drop::with_errors(vec![error])).unwrap())
}
}
}
// `mask_json_schema` has been removed as the mask architecture is fully replaced by Spi string queries during DB interactions.
#[pg_extern(strict, parallel_safe)]
pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
pub fn jspg_validate(schema_id: &str, instance: JsonB) -> JsonB {
// 1. Acquire Snapshot
let jspg_arc = {
let lock = GLOBAL_JSPG.read().unwrap();
@ -79,7 +153,7 @@ pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
} else {
let error = crate::drop::Error {
code: "VALIDATOR_NOT_INITIALIZED".to_string(),
message: "The JSPG database has not been cached yet. Run jspg_cache_database()".to_string(),
message: "The JSPG database has not been cached yet. Run jspg_setup()".to_string(),
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
@ -89,42 +163,33 @@ pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
}
}
#[pg_extern(strict, parallel_safe)]
pub fn json_schema_cached(schema_id: &str) -> bool {
if let Some(engine) = GLOBAL_JSPG.read().unwrap().as_ref() {
match engine
.validator
.validate(schema_id, &serde_json::Value::Null)
{
Err(e) if e.code == "SCHEMA_NOT_FOUND" => false,
_ => true,
#[pg_extern]
pub fn jspg_get_punc_stems(punc_name: &str) -> JsonB {
let engine_opt = {
let lock = GLOBAL_JSPG.read().unwrap();
lock.clone()
};
match engine_opt {
Some(engine) => {
if let Some(punc) = engine.database.puncs.get(punc_name) {
JsonB(serde_json::to_value(&punc.stems).unwrap_or(serde_json::Value::Array(vec![])))
} else {
JsonB(serde_json::Value::Array(vec![]))
}
}
} else {
false
None => JsonB(serde_json::Value::Array(vec![])),
}
}
#[pg_extern(strict)]
pub fn clear_json_schemas() -> JsonB {
pub fn jspg_teardown() -> JsonB {
let mut lock = GLOBAL_JSPG.write().unwrap();
*lock = None;
let drop = crate::drop::Drop::success();
JsonB(serde_json::to_value(drop).unwrap())
}
#[pg_extern(strict, parallel_safe)]
pub fn show_json_schemas() -> JsonB {
if let Some(engine) = GLOBAL_JSPG.read().unwrap().as_ref() {
let mut keys = engine.validator.get_schema_ids();
keys.sort();
let drop = crate::drop::Drop::success_with_val(json!(keys));
JsonB(serde_json::to_value(drop).unwrap())
} else {
let drop = crate::drop::Drop::success_with_val(json!([]));
JsonB(serde_json::to_value(drop).unwrap())
}
}
#[cfg(any(test, feature = "pg_test"))]
#[pg_schema]
mod tests {

24
src/merger/cache.rs Normal file
View File

@ -0,0 +1,24 @@
use dashmap::DashMap;
pub struct StatementCache {
/// Maps a Cache Key (String) -> SQL String (String)
statements: DashMap<String, String>,
}
impl StatementCache {
pub fn new(_max_capacity: u64) -> Self {
Self {
statements: DashMap::new(),
}
}
/// Retrieve an existing statement name by key, or None if it missed
pub fn get(&self, key: &str) -> Option<String> {
self.statements.get(key).map(|v| v.clone())
}
/// Insert a completely verified/compiled statement string into the cache
pub fn insert(&self, key: String, sql: String) {
self.statements.insert(key, sql);
}
}

View File

@ -1,15 +1,737 @@
pub struct Merger {
// To be implemented
}
//! The `merger` module handles executing Postgres SPI directives dynamically based on JSON payloads
//! using the structurally isolated schema rules provided by the `Database` registry.
impl Default for Merger {
fn default() -> Self {
Self::new()
}
pub mod cache;
use crate::database::Database;
use serde_json::Value;
use std::sync::Arc;
pub struct Merger {
pub db: Arc<Database>,
pub cache: cache::StatementCache,
}
impl Merger {
pub fn new() -> Self {
Self {}
pub fn new(db: Arc<Database>) -> Self {
Self {
db,
cache: cache::StatementCache::new(10_000),
}
}
/// Primary recursive entrypoint that separates Array lists from Object branches
pub fn merge(&self, data: Value) -> Result<Value, String> {
let result = match data {
Value::Array(items) => self.merge_array(items)?,
Value::Object(map) => self.merge_object(map)?,
// Nulls, Strings, Bools, Numbers at root are invalid merge payloads
_ => return Err("Invalid merge payload: root must be an Object or Array".to_string()),
};
Ok(match result {
Value::Object(mut map) => {
let mut out = serde_json::Map::new();
if let Some(id) = map.remove("id") {
out.insert("id".to_string(), id);
}
Value::Object(out)
}
Value::Array(arr) => {
let mut out_arr = Vec::new();
for item in arr {
if let Value::Object(mut map) = item {
let mut out = serde_json::Map::new();
if let Some(id) = map.remove("id") {
out.insert("id".to_string(), id);
}
out_arr.push(Value::Object(out));
} else {
out_arr.push(Value::Null);
}
}
Value::Array(out_arr)
}
other => other,
})
}
/// Handles mapping over an array of entities, executing merge logic on each and returning the resolved list.
fn merge_array(&self, items: Vec<Value>) -> Result<Value, String> {
let mut resolved_items = Vec::new();
for item in items {
// Recursively evaluate each object in the array
let resolved = self.merge(item)?;
resolved_items.push(resolved);
}
Ok(Value::Array(resolved_items))
}
/// Core processing algorithm for a single Entity Object dictionary.
fn merge_object(&self, mut obj: serde_json::Map<String, Value>) -> Result<Value, String> {
// Step 1: Ensure it has a `type` definition to proceed
let type_name = match obj.get("type").and_then(|v| v.as_str()) {
Some(t) => t.to_string(),
None => return Err("Missing required 'type' field on object".to_string()),
};
// Step 2: Extract Type mapping from the Engine
let type_def = match self.db.types.get(&type_name) {
Some(t) => t,
None => return Err(format!("Unknown entity type: {}", type_name)),
};
// Step 3 & 4: (Pre/Post Staging based on `relationship` flag)
if type_def.relationship {
// Relationships: process children FIRST (Post-Staging)
self.process_children(&mut obj, type_def)?;
Ok(Value::Object(self.stage_entity(obj)?))
} else {
// Entities: process core FIRST (Pre-Staging)
let mut staged_obj_map = self.stage_entity(obj)?;
self.process_children(&mut staged_obj_map, type_def)?;
Ok(Value::Object(staged_obj_map))
}
}
/// Iterates values of `obj`, if they are structural (Array/Object), executes `self.merge()` on them.
/// Uses the `Database` registry to find FK relations and apply the IDs upstream/downstream appropriately.
fn process_children(
&self,
obj: &mut serde_json::Map<String, Value>,
type_def: &crate::database::r#type::Type,
) -> Result<(), String> {
let keys: Vec<String> = obj.keys().cloned().collect();
for key in keys {
// Temporarily extract value to process without borrowing Map mutably
let val = match obj.remove(&key) {
Some(v) => v,
None => continue,
};
if val.is_object() || val.is_array() {
// Pre-Process: Propagate parent data to children BEFORE recursing and applying relations
let mut child_val = val;
let mut relation_info = None;
// Try to peek at the child type for relational mapping
let peek_obj = match &child_val {
Value::Object(m) => Some(m),
Value::Array(arr) if !arr.is_empty() => arr[0].as_object(),
_ => None,
};
if let Some(child_map) = peek_obj {
if let Ok(Some(relation)) = self.get_entity_relation(obj, type_def, child_map, &key) {
let child_type_name = child_map.get("type").and_then(|v| v.as_str()).unwrap_or("");
if let Some(c_type) = self.db.types.get(child_type_name) {
let parent_is_source = type_def.hierarchy.contains(&relation.source_type);
let child_is_source = c_type.hierarchy.contains(&relation.source_type);
relation_info = Some((relation, parent_is_source, child_is_source));
}
}
}
// Apply pre-merge mutations mapping IDs
if let Some((relation, _parent_is_source, child_is_source)) = relation_info.as_ref() {
match &mut child_val {
Value::Object(child_map) => {
// Cascade Organization ID
if !child_map.contains_key("organization_id") {
if let Some(org_id) = obj.get("organization_id") {
child_map.insert("organization_id".to_string(), org_id.clone());
}
}
// If child owns FK, parent provides it
if *child_is_source {
Self::apply_entity_relation(
child_map,
&relation.source_columns,
&relation.destination_columns,
obj,
);
}
}
Value::Array(items) => {
for item in items.iter_mut() {
if let Value::Object(child_map) = item {
if !child_map.contains_key("organization_id") {
if let Some(org_id) = obj.get("organization_id") {
child_map.insert("organization_id".to_string(), org_id.clone());
}
}
if *child_is_source {
Self::apply_entity_relation(
child_map,
&relation.source_columns,
&relation.destination_columns,
obj,
);
}
}
}
}
_ => {}
}
}
// RECURSE: Merge the modified children
let merged_val = self.merge(child_val)?;
// Post-Process: Apply relations upwards if parent owns the FK
if let Some((relation, parent_is_source, _child_is_source)) = relation_info {
if parent_is_source {
match &merged_val {
Value::Object(merged_child_map) => {
Self::apply_entity_relation(
obj,
&relation.source_columns,
&relation.destination_columns,
merged_child_map,
);
}
Value::Array(items) if !items.is_empty() => {
if let Value::Object(merged_child_map) = &items[0] {
Self::apply_entity_relation(
obj,
&relation.source_columns,
&relation.destination_columns,
merged_child_map,
);
}
}
_ => {}
}
}
}
obj.insert(key, merged_val);
} else {
obj.insert(key, val);
}
}
Ok(())
}
/// Evaluates `lk_` structures, fetches existing rows via SPI, computes `compare_entities` diff,
/// executes UPDATE/INSERT SPI, and handles `agreego.change` auditing.
fn stage_entity(
&self,
mut obj: serde_json::Map<String, Value>,
) -> Result<serde_json::Map<String, Value>, String> {
let type_name = obj
.get("type")
.and_then(|v| v.as_str())
.unwrap()
.to_string();
let type_def = self.db.types.get(&type_name).unwrap();
// 1. Fetch Existing Entity
let existing_entity = self.fetch_entity(&obj, type_def)?;
// 2. Identify System Keys we don't want to diff
let system_keys = vec![
"id".to_string(),
"type".to_string(),
"organization_id".to_string(),
"created_by".to_string(),
"modified_by".to_string(),
"created_at".to_string(),
"modified_at".to_string(),
];
// 3. Compare entities to find exact changes
let changes = self.compare_entities(
existing_entity.as_ref(),
&obj,
&type_def.fields,
&system_keys,
);
// 4. If no changes and an entity existed, we skip
let is_update = existing_entity.is_some();
if is_update && changes.is_empty() {
return Ok(obj);
}
// 5. Apply correct system fields
let user_id = self.db.auth_user_id()?;
let timestamp = self.db.timestamp()?;
let entity_change_kind = if !is_update {
if !obj.contains_key("id") {
use uuid::Uuid;
obj.insert("id".to_string(), Value::String(Uuid::new_v4().to_string()));
}
obj.insert("created_by".to_string(), Value::String(user_id.clone()));
obj.insert("created_at".to_string(), Value::String(timestamp.clone()));
obj.insert("modified_by".to_string(), Value::String(user_id.clone()));
obj.insert("modified_at".to_string(), Value::String(timestamp.clone()));
"create"
} else {
obj.insert("modified_by".to_string(), Value::String(user_id.clone()));
obj.insert("modified_at".to_string(), Value::String(timestamp.clone()));
"update"
};
// 6. Execute SQL Merges
self.merge_entity_fields(is_update, &type_name, type_def, &changes, &obj)?;
// 7. Fire agreego.change
let mut complete = obj.clone();
if is_update {
// overlay on top of existing for complete state
if let Some(mut existing) = existing_entity {
for (k, v) in &obj {
existing.insert(k.clone(), v.clone());
}
complete = existing;
}
}
let mut notification = serde_json::Map::new();
notification.insert("complete".to_string(), Value::Object(complete.clone()));
let changes_val = if !is_update {
let mut c = changes.clone();
c.insert("type".to_string(), Value::String(type_name.clone()));
Value::Object(c)
} else {
notification.insert("changes".to_string(), Value::Object(changes.clone()));
Value::Object(changes.clone())
};
let change_sql = format!(
"INSERT INTO agreego.change (changes, entity_id, id, kind, modified_at, modified_by) VALUES ({}, {}, {}, {}, {}, {})",
Self::quote_literal(&changes_val),
Self::quote_literal(obj.get("id").unwrap()),
Self::quote_literal(&Value::String(uuid::Uuid::new_v4().to_string())),
Self::quote_literal(&Value::String(entity_change_kind.to_string())),
Self::quote_literal(&Value::String(timestamp.clone())),
Self::quote_literal(&Value::String(user_id.clone()))
);
let notification_json = Value::Object(notification);
let notify_sql = format!(
"SELECT pg_notify('entity', {})",
Self::quote_literal(&Value::String(notification_json.to_string()))
);
self
.db
.execute(&change_sql, None)
.map_err(|e| format!("Executor Error in change: {:?}", e))?;
self
.db
.execute(&notify_sql, None)
.map_err(|e| format!("Executor Error in notify: {:?}", e))?;
Ok(obj)
}
/// Exact replica of `agreego.compare_entities`. Takes a fetched `old` entity from the DB (if any),
/// the `new_fields` from the JSON payload, the `fields` defined on the `Type` hierarchy, and a list of `system_keys`.
/// Returns a clean JSON object containing ONLY the modified keys, or an empty map if No-Op.
fn compare_entities(
&self,
fetched_entity: Option<&serde_json::Map<String, Value>>,
new_fields: &serde_json::Map<String, Value>,
type_fields: &[String],
system_keys: &[String],
) -> serde_json::Map<String, Value> {
let mut changes = serde_json::Map::new();
for (key, new_val) in new_fields {
// 1. Skip if key is not part of the Type's total field schema mapping
if !type_fields.contains(key) {
continue;
}
// 2. Skip strictly managed system audit keys
if system_keys.contains(key) {
continue;
}
match fetched_entity {
// 3a. If no old entity, every valid field is a new "change"
None => {
changes.insert(key.clone(), new_val.clone());
}
// 3b. If old entity exists, strictly compare the values
Some(old_map) => {
let old_val = old_map.get(key).unwrap_or(&Value::Null);
if new_val != old_val {
changes.insert(key.clone(), new_val.clone());
}
}
}
}
changes
}
/// Exact replica of `agreego.reduce_entity_relations`. Resolves Ambiguous Graph paths
/// down to a single distinct FK relationship path based on prefix rules.
fn reduce_entity_relations(
&self,
mut matching_relations: Vec<crate::database::relation::Relation>,
relative: &serde_json::Map<String, Value>,
relation_name: &str,
) -> Result<Option<crate::database::relation::Relation>, String> {
// 0 or 1 relations is an immediate fast-path resolution
if matching_relations.is_empty() {
return Ok(None);
}
if matching_relations.len() == 1 {
return Ok(Some(matching_relations.pop().unwrap()));
}
// Step 1: Check for exact prefix match with the relation_name pointer
let exact_match: Vec<_> = matching_relations
.iter()
.filter(|r| r.prefix.as_deref() == Some(relation_name))
.cloned()
.collect();
if exact_match.len() == 1 {
return Ok(Some(exact_match.into_iter().next().unwrap()));
}
// Step 2: Inverse filter - Remove any relations where their configured prefix IS found
// inside the actual payload data on `relative`
matching_relations.retain(|r| {
if let Some(prefix) = &r.prefix {
// If the prefix exists as a key in the relative JSON payload, we KEEP iter
// (Wait, actually the SQL is `WHERE NOT EXISTS (select mr.prefix where relative ? mr.prefix)`
// Translating: Keep relation R if its prefix is NOT matched in the payload
!relative.contains_key(prefix)
} else {
true // No prefix means we keep it by default
}
});
if matching_relations.len() == 1 {
Ok(Some(matching_relations.pop().unwrap()))
} else {
let constraints: Vec<_> = matching_relations
.iter()
.map(|r| r.constraint.clone())
.collect();
Err(format!(
"AMBIGUOUS_TYPE_RELATIONS: Could not reduce ambiguous type relations: {}",
constraints.join(", ")
))
}
}
/// Exact replica of `agreego.get_entity_relation`. Given two entities (`entity` and `relative`) and the JSON key linking them,
/// it searches the Database graphs for a concrete FK constraint.
fn get_entity_relation(
&self,
_entity: &serde_json::Map<String, Value>,
entity_type: &crate::database::r#type::Type,
relative: &serde_json::Map<String, Value>,
relation_name: &str,
) -> Result<Option<crate::database::relation::Relation>, String> {
let relative_type_name = relative.get("type").and_then(|v| v.as_str()).unwrap_or("");
let relative_type = match self.db.types.get(relative_type_name) {
Some(t) => t,
None => return Ok(None),
};
let mut relative_relations: Vec<crate::database::relation::Relation> = Vec::new();
// 1. Look for direct relationships first
for r in self.db.relations.values() {
if r.source_type != "entity" && r.destination_type != "entity" {
let condition1 = relative_type.hierarchy.contains(&r.source_type)
&& entity_type.hierarchy.contains(&r.destination_type);
let condition2 = entity_type.hierarchy.contains(&r.source_type)
&& relative_type.hierarchy.contains(&r.destination_type);
if condition1 || condition2 {
relative_relations.push(r.clone());
}
}
}
let mut relative_relation =
self.reduce_entity_relations(relative_relations, relative, relation_name)?;
// 2. Look for polymorphic relationships if no direct relationship is found
if relative_relation.is_none() {
let mut poly_relations: Vec<crate::database::relation::Relation> = Vec::new();
for r in self.db.relations.values() {
if r.destination_type == "entity" {
let condition1 = relative_type.hierarchy.contains(&r.source_type);
let condition2 = entity_type.hierarchy.contains(&r.source_type);
if condition1 || condition2 {
poly_relations.push(r.clone());
}
}
}
relative_relation = self.reduce_entity_relations(poly_relations, relative, relation_name)?;
}
Ok(relative_relation)
}
/// Exact replica of `agreego.apply_entity_relation`. Syncs FK column values from the destination to the source.
fn apply_entity_relation(
source_entity: &mut serde_json::Map<String, Value>,
source_columns: &[String],
destination_columns: &[String],
destination_entity: &serde_json::Map<String, Value>,
) {
if source_columns.len() != destination_columns.len() {
// In theory, validation should prevent this, but fail gracefully/ignore if lengths diverge.
return;
}
for i in 0..source_columns.len() {
let dest_val = destination_entity
.get(&destination_columns[i])
.unwrap_or(&Value::Null)
.clone();
source_entity.insert(source_columns[i].clone(), dest_val);
}
}
/// Exact replica of `agreego.fetch_entity`. Dynamically constructs a `SELECT to_jsonb(t1.*) || to_jsonb(t2.*)`
/// based on the Type hierarchy and available `id` or `lookup_fields` presence.
fn fetch_entity(
&self,
entity_fields: &serde_json::Map<String, Value>,
entity_type: &crate::database::r#type::Type,
) -> Result<Option<serde_json::Map<String, Value>>, String> {
let id_val = entity_fields.get("id");
let entity_type_name = entity_type.name.as_str();
// Check if all required lookup keys are PRESENT (value can be anything, including NULL)
let lookup_complete = if entity_type.lookup_fields.is_empty() {
false
} else {
entity_type
.lookup_fields
.iter()
.all(|f| entity_fields.contains_key(f))
};
if id_val.is_none() && !lookup_complete {
return Ok(None);
}
// Build or Retrieve Cached Select/Join clauses
let fetch_sql_template = if let Some(cached) = self.cache.get(entity_type_name) {
cached
} else {
let mut select_list = String::from("to_jsonb(t1.*)");
let mut join_clauses = format!("FROM agreego.\"{}\" t1", entity_type.hierarchy[0]);
for (i, table_name) in entity_type.hierarchy.iter().enumerate().skip(1) {
let t_alias = format!("t{}", i + 1);
join_clauses.push_str(&format!(
" LEFT JOIN agreego.\"{}\" {} ON {}.id = t1.id",
table_name, t_alias, t_alias
));
select_list.push_str(&format!(" || to_jsonb({}.*)", t_alias));
}
let template = format!("SELECT {} {}", select_list, join_clauses);
self
.cache
.insert(entity_type_name.to_string(), template.clone());
template
};
// Build WHERE Clauses
let mut id_condition = None;
if let Some(id) = id_val {
id_condition = Some(format!("t1.id = {}", Self::quote_literal(id)));
}
let mut lookup_condition = None;
if lookup_complete {
let mut lookup_predicates = Vec::new();
for column in &entity_type.lookup_fields {
let val = entity_fields.get(column).unwrap_or(&Value::Null);
if column == "type" {
lookup_predicates.push(format!("t1.\"{}\" = {}", column, Self::quote_literal(val)));
} else {
if val.as_str() == Some("") || val.is_null() {
lookup_predicates.push(format!("\"{}\" IS NULL", column));
} else {
lookup_predicates.push(format!("\"{}\" = {}", column, Self::quote_literal(val)));
}
}
}
lookup_condition = Some(lookup_predicates.join(" AND "));
}
// Determine final WHERE clause based on available conditions
let where_clause = match (id_condition, lookup_condition) {
(Some(id_cond), Some(lookup_cond)) => format!("WHERE ({}) OR ({})", id_cond, lookup_cond),
(Some(id_cond), None) => format!("WHERE {}", id_cond),
(None, Some(lookup_cond)) => format!("WHERE {}", lookup_cond),
(None, None) => return Ok(None),
};
// Construct Final Query
let fetch_sql = format!("{} {}", fetch_sql_template, where_clause);
// Execute and Return Result via Database Executor
let fetched = match self.db.query(&fetch_sql, None) {
Ok(Value::Array(table)) => {
if table.len() > 1 {
Err(format!(
"TOO_MANY_LOOKUP_ROWS: Lookup for {} found too many existing rows",
entity_type_name
))
} else if table.is_empty() {
Ok(None)
} else {
let row = table.first().unwrap();
match row {
Value::Object(map) => Ok(Some(map.clone())),
other => Err(format!(
"Expected fetch_entity to return JSON object, got: {:?}",
other
)),
}
}
}
Ok(other) => Err(format!(
"Expected array from query in fetch_entity, got: {:?}",
other
)),
Err(e) => Err(format!("SPI error in fetch_entity: {:?}", e)),
}?;
Ok(fetched)
}
/// Exact replica of `agreego.merge_entity_fields`. Issues an INSERT or UPDATE per table
/// in the Type's hierarchy, filtering out keys that don't belong to the specific table block.
fn merge_entity_fields(
&self,
is_update: bool,
entity_type_name: &str,
entity_type: &crate::database::r#type::Type,
changes: &serde_json::Map<String, Value>,
full_entity: &serde_json::Map<String, Value>,
) -> Result<(), String> {
let id_str = match full_entity.get("id").and_then(|v| v.as_str()) {
Some(id) => id,
None => return Err("Missing 'id' for merge execution".to_string()),
};
let grouped_fields = match &entity_type.grouped_fields {
Some(Value::Object(map)) => map,
_ => {
return Err(format!(
"Grouped fields missing for type {}",
entity_type_name
));
}
};
for table_name in &entity_type.hierarchy {
// get the fields for this specific table (from grouped_fields)
let table_fields = match grouped_fields.get(table_name).and_then(|v| v.as_array()) {
Some(arr) => arr
.iter()
.filter_map(|v| v.as_str().map(|s| s.to_string()))
.collect::<Vec<_>>(),
None => continue,
};
let mut my_changes = Vec::new();
for field in &table_fields {
if changes.contains_key(field) || (!is_update && full_entity.contains_key(field)) {
// For inserts we want all provided fields. For updates we only want changes.
my_changes.push(field.clone());
}
}
if is_update {
if my_changes.is_empty() {
continue;
}
let mut set_clauses = Vec::new();
for field in &my_changes {
let val = changes.get(field).unwrap();
set_clauses.push(format!("\"{}\" = {}", field, Self::quote_literal(val)));
}
let sql = format!(
"UPDATE agreego.\"{}\" SET {} WHERE id = {}",
table_name,
set_clauses.join(", "),
Self::quote_literal(&Value::String(id_str.to_string()))
);
self
.db
.execute(&sql, None)
.map_err(|e| format!("SPI Error in UPDATE: {:?}", e))?;
} else {
// INSERT
let mut columns = Vec::new();
let mut values = Vec::new();
for field in &my_changes {
columns.push(format!("\"{}\"", field));
let val = full_entity.get(field).unwrap();
values.push(Self::quote_literal(val));
}
// Ensure 'id' and 'type' are present if required by this specific table schema chunk
if !columns.contains(&"\"id\"".to_string()) && table_fields.contains(&"id".to_string()) {
columns.push("\"id\"".to_string());
values.push(Self::quote_literal(&Value::String(id_str.to_string())));
}
if !columns.contains(&"\"type\"".to_string()) && table_fields.contains(&"type".to_string())
{
columns.push("\"type\"".to_string());
values.push(Self::quote_literal(&Value::String(
entity_type_name.to_string(),
)));
}
if columns.is_empty() {
continue;
}
let sql = format!(
"INSERT INTO agreego.\"{}\" ({}) VALUES ({})",
table_name,
columns.join(", "),
values.join(", ")
);
self
.db
.execute(&sql, None)
.map_err(|e| format!("SPI Error in INSERT: {:?}", e))?;
}
}
Ok(())
}
/// Helper to emulate Postgres `quote_literal`
fn quote_literal(val: &Value) -> String {
match val {
Value::Null => "NULL".to_string(),
Value::Bool(b) => {
if *b {
"true".to_string()
} else {
"false".to_string()
}
}
Value::Number(n) => n.to_string(),
Value::String(s) => format!("'{}'", s.replace('\'', "''")),
_ => format!(
"'{}'",
serde_json::to_string(val).unwrap().replace('\'', "''")
),
}
}
}

369
src/queryer/compiler.rs Normal file
View File

@ -0,0 +1,369 @@
use crate::database::Database;
use std::sync::Arc;
pub struct SqlCompiler {
pub db: Arc<Database>,
}
impl SqlCompiler {
pub fn new(db: Arc<Database>) -> Self {
Self { db }
}
/// Compiles a JSON schema into a nested PostgreSQL query returning JSONB
pub fn compile(
&self,
schema_id: &str,
stem_path: Option<&str>,
filter_keys: &[String],
) -> Result<String, String> {
let schema = self
.db
.schemas
.get(schema_id)
.ok_or_else(|| format!("Schema not found: {}", schema_id))?;
let target_schema = if let Some(path) = stem_path.filter(|p| !p.is_empty() && *p != "/") {
self.resolve_stem(schema, path)?
} else {
schema
};
// 1. We expect the top level to typically be an Object or Array
let (sql, _) = self.walk_schema(target_schema, "t1", None, filter_keys)?;
Ok(sql)
}
fn resolve_stem<'a>(
&'a self,
mut schema: &'a crate::database::schema::Schema,
path: &str,
) -> Result<&'a crate::database::schema::Schema, String> {
let parts: Vec<&str> = path.trim_start_matches('/').split('/').collect();
for part in parts {
let mut current = schema;
let mut depth = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth += 1;
if depth > 20 {
break;
}
}
if current.obj.properties.is_none() && current.obj.items.is_some() {
if let Some(items) = &current.obj.items {
current = items;
let mut depth2 = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth2 += 1;
if depth2 > 20 {
break;
}
}
}
}
if let Some(props) = &current.obj.properties {
if let Some(next_schema) = props.get(part) {
schema = next_schema;
} else {
return Err(format!("Stem part '{}' not found in schema", part));
}
} else {
return Err(format!(
"Cannot resolve stem part '{}': not an object",
part
));
}
}
let mut current = schema;
let mut depth = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth += 1;
if depth > 20 {
break;
}
}
Ok(current)
}
/// Recursively walks the schema AST emitting native PostgreSQL jsonb mapping
/// Returns a tuple of (SQL_String, Field_Type)
fn walk_schema(
&self,
schema: &crate::database::schema::Schema,
parent_alias: &str,
prop_name_context: Option<&str>,
filter_keys: &[String],
) -> Result<(String, String), String> {
// Determine the base schema type (could be an array, object, or literal)
match &schema.obj.type_ {
Some(crate::database::schema::SchemaTypeOrArray::Single(t)) if t == "array" => {
// Handle Arrays:
if let Some(items) = &schema.obj.items {
if let Some(ref_id) = &items.obj.r#ref {
if let Some(type_def) = self.db.types.get(ref_id) {
return self.compile_entity_node(
items,
type_def,
parent_alias,
prop_name_context,
true,
filter_keys,
);
}
}
let (item_sql, _) =
self.walk_schema(items, parent_alias, prop_name_context, filter_keys)?;
return Ok((
format!("(SELECT jsonb_agg({}) FROM TODO)", item_sql),
"array".to_string(),
));
}
Ok((
"SELECT jsonb_agg(TODO) FROM TODO".to_string(),
"array".to_string(),
))
}
_ => {
// Handle Objects & Direct Refs
if let Some(ref_id) = &schema.obj.r#ref {
// If it's a $ref, check if it points to an Entity Type
if let Some(type_def) = self.db.types.get(ref_id) {
return self.compile_entity_node(
schema,
type_def,
parent_alias,
prop_name_context,
false,
filter_keys,
);
}
// If it's just an ad-hoc struct ref, we should resolve it
if let Some(target_schema) = self.db.schemas.get(ref_id) {
return self.walk_schema(target_schema, parent_alias, prop_name_context, filter_keys);
}
return Err(format!("Unresolved $ref: {}", ref_id));
}
// Just an inline object definition?
if let Some(props) = &schema.obj.properties {
return self.compile_inline_object(props, parent_alias, filter_keys);
}
// Literal fallback
Ok((
format!(
"{}.{}",
parent_alias,
prop_name_context.unwrap_or("unknown_prop")
),
"string".to_string(),
))
}
}
}
fn compile_entity_node(
&self,
schema: &crate::database::schema::Schema,
type_def: &crate::database::r#type::Type,
parent_alias: &str,
prop_name: Option<&str>,
is_array: bool,
filter_keys: &[String],
) -> Result<(String, String), String> {
// We are compiling a query block for an Entity.
let mut select_args = Vec::new();
// Mapping table hierarchy to aliases, e.g., ["person", "user", "organization", "entity"]
let local_ctx = format!("{}_{}", parent_alias, prop_name.unwrap_or("obj"));
// e.g., parent_t1_contact -> we'll use t1 for the first of this block, t2 for the second, etc.
// Actually, local_ctx can just be exactly that prop's unique path.
let mut table_aliases = std::collections::HashMap::new();
let mut from_clauses = Vec::new();
for (i, table_name) in type_def.hierarchy.iter().enumerate() {
let alias = format!("{}_t{}", local_ctx, i + 1);
table_aliases.insert(table_name.clone(), alias.clone());
if i == 0 {
from_clauses.push(format!("agreego.{} {}", table_name, alias));
} else {
// Join to previous
let prev_alias = format!("{}_t{}", local_ctx, i);
from_clauses.push(format!(
"JOIN agreego.{} {} ON {}.id = {}.id",
table_name, alias, alias, prev_alias
));
}
}
// Now, let's map properties from the schema to the correct table alias using grouped_fields
// grouped_fields is { "person": ["first_name", ...], "user": ["password"], ... }
let grouped_fields = type_def.grouped_fields.as_ref().and_then(|v| v.as_object());
if let Some(props) = &schema.obj.properties {
for (prop_key, prop_schema) in props {
// Find which table owns this property
// Find which table owns this property
let mut owner_alias = table_aliases
.get("entity")
.cloned()
.unwrap_or_else(|| format!("{}_t_err", parent_alias));
if let Some(gf) = grouped_fields {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(prop_key)) {
owner_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| parent_alias.to_string());
break;
}
}
}
}
// Now we know `owner_alias`, e.g., `parent_t1` or `parent_t3`.
// Walk the property to get its SQL value
let (val_sql, _) =
self.walk_schema(prop_schema, &owner_alias, Some(prop_key), filter_keys)?;
select_args.push(format!("'{}', {}", prop_key, val_sql));
}
}
let jsonb_obj_sql = if select_args.is_empty() {
"jsonb_build_object()".to_string()
} else {
format!("jsonb_build_object({})", select_args.join(", "))
};
let base_alias = table_aliases
.get(&type_def.name)
.cloned()
.unwrap_or_else(|| "err".to_string());
let mut where_clauses = Vec::new();
where_clauses.push(format!("NOT {}.archived", base_alias));
// Filter Mapping - Only append filters if this is the ROOT table query (i.e. parent_alias is "t1")
// Because cue.filters operates strictly on top-level root properties right now.
if parent_alias == "t1" && prop_name.is_none() {
for (i, filter_key) in filter_keys.iter().enumerate() {
// Find which table owns this filter key
let mut filter_alias = base_alias.clone(); // default to root table (e.g. t3 entity)
if let Some(gf) = type_def.grouped_fields.as_ref().and_then(|v| v.as_object()) {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(filter_key)) {
filter_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| base_alias.clone());
break;
}
}
}
}
let mut is_ilike = false;
let mut cast = "";
// Check schema for filter_key to determine datatype operation
if let Some(props) = &schema.obj.properties {
if let Some(ps) = props.get(filter_key) {
let is_enum = ps.obj.enum_.is_some();
if let Some(crate::database::schema::SchemaTypeOrArray::Single(t)) = &ps.obj.type_ {
if t == "string" {
if ps.obj.format.as_deref() == Some("uuid") {
cast = "::uuid";
} else if ps.obj.format.as_deref() == Some("date-time") {
cast = "::timestamptz";
} else if !is_enum {
is_ilike = true;
}
} else if t == "boolean" {
cast = "::boolean";
} else if t == "integer" || t == "number" {
cast = "::numeric";
}
}
}
}
// Add to WHERE clause using 1-indexed args pointer: $1, $2
if is_ilike {
let param = format!("${}#>>'{{}}'", i + 1);
where_clauses.push(format!("{}.{} ILIKE {}", filter_alias, filter_key, param));
} else {
let param = format!("(${}#>>'{{}}'){}", i + 1, cast);
where_clauses.push(format!("{}.{} = {}", filter_alias, filter_key, param));
}
}
}
// Resolve FK relationship constraint if this is a nested subquery
if let Some(_prop) = prop_name {
// MOCK relation resolution (will integrate with `get_entity_relation` properly)
// By default assume FK is parent_id on child
where_clauses.push(format!("{}.parent_id = {}.id", base_alias, parent_alias));
}
// Wrap the object in the final array or object SELECT
let selection = if is_array {
format!("COALESCE(jsonb_agg({}), '[]'::jsonb)", jsonb_obj_sql)
} else {
jsonb_obj_sql
};
let full_sql = format!(
"(SELECT {} FROM {} WHERE {})",
selection,
from_clauses.join(" "),
where_clauses.join(" AND ")
);
Ok((
full_sql,
if is_array {
"array".to_string()
} else {
"object".to_string()
},
))
}
fn compile_inline_object(
&self,
props: &std::collections::BTreeMap<String, std::sync::Arc<crate::database::schema::Schema>>,
parent_alias: &str,
filter_keys: &[String],
) -> Result<(String, String), String> {
let mut build_args = Vec::new();
for (k, v) in props {
let (child_sql, _) = self.walk_schema(v, parent_alias, Some(k), filter_keys)?;
build_args.push(format!("'{}', {}", k, child_sql));
}
let combined = format!("jsonb_build_object({})", build_args.join(", "));
Ok((combined, "object".to_string()))
}
}

View File

@ -1,15 +1,83 @@
pub struct Queryer {
// To be implemented
}
use crate::database::Database;
use std::sync::Arc;
impl Default for Queryer {
fn default() -> Self {
Self::new()
}
pub mod compiler;
use dashmap::DashMap;
pub struct Queryer {
pub db: Arc<Database>,
cache: DashMap<String, String>,
}
impl Queryer {
pub fn new() -> Self {
Self {}
pub fn new(db: Arc<Database>) -> Self {
Self {
db,
cache: DashMap::new(),
}
}
/// Entrypoint to execute a dynamically compiled query based on a schema
pub fn query(
&self,
schema_id: &str,
stem_opt: Option<&str>,
filters: Option<&serde_json::Value>,
) -> Result<serde_json::Value, String> {
let filters_map: Option<&serde_json::Map<String, serde_json::Value>> =
filters.and_then(|f| f.as_object());
// Generate Permutation Cache Key: schema_id + sorted filter keys
let mut filter_keys: Vec<String> = Vec::new();
if let Some(fm) = filters_map {
for key in fm.keys() {
filter_keys.push(key.clone());
}
}
filter_keys.sort();
let stem_key = stem_opt.unwrap_or("/");
let cache_key = format!("{}(Stem:{}):{}", schema_id, stem_key, filter_keys.join(","));
let sql = if let Some(cached_sql) = self.cache.get(&cache_key) {
cached_sql.value().clone()
} else {
// Compile the massive base SQL string
let compiler = compiler::SqlCompiler::new(self.db.clone());
let compiled_sql = compiler.compile(schema_id, stem_opt, &filter_keys)?;
self.cache.insert(cache_key.clone(), compiled_sql.clone());
compiled_sql
};
// 2. Prepare the execution arguments from the filters
let mut args: Vec<serde_json::Value> = Vec::new();
if let Some(fm) = filters_map {
for (_i, key) in filter_keys.iter().enumerate() {
if let Some(val) = fm.get(key) {
args.push(val.clone());
}
}
}
// 3. Execute via Database Executor
let fetched = match self.db.query(&sql, Some(&args)) {
Ok(serde_json::Value::Array(table)) => {
if table.is_empty() {
Ok(serde_json::Value::Null)
} else {
// We expect the query to return a single JSONB column, already unpacked from row[0]
Ok(table.first().unwrap().clone())
}
}
Ok(other) => Err(format!(
"Expected array from generic query, got: {:?}",
other
)),
Err(e) => Err(format!("SPI error in queryer: {}", e)),
}?;
Ok(fetched)
}
}

View File

@ -10,12 +10,48 @@ struct TestSuite {
}
#[derive(Debug, Deserialize)]
struct TestCase {
description: String,
data: serde_json::Value,
valid: bool,
// Support explicit schema ID target for test case
schema_id: String,
pub struct TestCase {
pub description: String,
#[serde(default = "default_action")]
pub action: String, // "validate", "merge", or "query"
// For Validate & Query
#[serde(default)]
pub schema_id: String,
// For Query
#[serde(default)]
pub stem: Option<String>,
#[serde(default)]
pub filters: Option<serde_json::Value>,
// For Merge & Validate
#[serde(default)]
pub data: Option<serde_json::Value>,
// For Merge & Query mocks
#[serde(default)]
pub mocks: Option<serde_json::Value>,
pub expect: Option<ExpectBlock>,
// Legacy support for older tests to avoid migrating them all instantly
pub valid: Option<bool>,
pub expect_errors: Option<Vec<serde_json::Value>>,
}
fn default_action() -> String {
"validate".to_string()
}
#[derive(Debug, Deserialize)]
pub struct ExpectBlock {
pub success: bool,
pub result: Option<serde_json::Value>,
pub errors: Option<Vec<serde_json::Value>>,
pub sql_patterns: Option<Vec<String>>,
}
// use crate::validator::registry::REGISTRY; // No longer used directly for tests!