query test progress

This commit is contained in:
2026-03-10 18:25:29 -04:00
parent bb263190f6
commit 1c08a8f2b8
20 changed files with 1949 additions and 225 deletions

1
.gitignore vendored
View File

@ -2,3 +2,4 @@
/package /package
.env .env
/src/tests.rs /src/tests.rs
/pgrx-develop

82
Cargo.lock generated
View File

@ -347,6 +347,30 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]] [[package]]
name = "crypto-common" name = "crypto-common"
version = "0.1.7" version = "0.1.7"
@ -357,6 +381,20 @@ dependencies = [
"typenum", "typenum",
] ]
[[package]]
name = "dashmap"
version = "6.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf"
dependencies = [
"cfg-if",
"crossbeam-utils",
"hashbrown 0.14.5",
"lock_api",
"once_cell",
"parking_lot_core",
]
[[package]] [[package]]
name = "digest" name = "digest"
version = "0.10.7" version = "0.10.7"
@ -589,6 +627,12 @@ version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
[[package]]
name = "hashbrown"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.15.5" version = "0.15.5"
@ -815,11 +859,13 @@ version = "0.1.0"
dependencies = [ dependencies = [
"ahash", "ahash",
"chrono", "chrono",
"dashmap",
"fluent-uri", "fluent-uri",
"idna", "idna",
"indexmap", "indexmap",
"json-pointer", "json-pointer",
"lazy_static", "lazy_static",
"moka",
"once_cell", "once_cell",
"percent-encoding", "percent-encoding",
"pgrx", "pgrx",
@ -830,6 +876,7 @@ dependencies = [
"serde_json", "serde_json",
"url", "url",
"uuid", "uuid",
"xxhash-rust",
] ]
[[package]] [[package]]
@ -930,6 +977,23 @@ dependencies = [
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
[[package]]
name = "moka"
version = "0.12.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85f8024e1c8e71c778968af91d43700ce1d11b219d127d79fb2934153b82b42b"
dependencies = [
"crossbeam-channel",
"crossbeam-epoch",
"crossbeam-utils",
"equivalent",
"parking_lot",
"portable-atomic",
"smallvec",
"tagptr",
"uuid",
]
[[package]] [[package]]
name = "nom" name = "nom"
version = "7.1.3" version = "7.1.3"
@ -1208,6 +1272,12 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "portable-atomic"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
[[package]] [[package]]
name = "postgres" name = "postgres"
version = "0.19.12" version = "0.19.12"
@ -1666,6 +1736,12 @@ dependencies = [
"windows", "windows",
] ]
[[package]]
name = "tagptr"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
[[package]] [[package]]
name = "tap" name = "tap"
version = "1.0.1" version = "1.0.1"
@ -2503,6 +2579,12 @@ dependencies = [
"tap", "tap",
] ]
[[package]]
name = "xxhash-rust"
version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
[[package]] [[package]]
name = "yoke" name = "yoke"
version = "0.8.1" version = "0.8.1"

View File

@ -20,6 +20,9 @@ uuid = { version = "1.20.0", features = ["v4", "serde"] }
chrono = { version = "0.4.43", features = ["serde"] } chrono = { version = "0.4.43", features = ["serde"] }
json-pointer = "0.3.4" json-pointer = "0.3.4"
indexmap = { version = "2.13.0", features = ["serde"] } indexmap = { version = "2.13.0", features = ["serde"] }
moka = { version = "0.12.14", features = ["sync"] }
xxhash-rust = { version = "0.8.15", features = ["xxh64"] }
dashmap = "6.1.0"
[dev-dependencies] [dev-dependencies]
pgrx-tests = "0.16.1" pgrx-tests = "0.16.1"

229
GEMINI.md
View File

@ -1,159 +1,94 @@
# JSPG: JSON Schema Postgres # JSPG: JSON Schema Postgres
**JSPG** is a high-performance PostgreSQL extension for in-memory JSON Schema validation, specifically targeting **Draft 2020-12**. **JSPG** is a high-performance PostgreSQL extension written in Rust (using `pgrx`) that transforms Postgres into a pre-compiled Semantic Engine. It serves as the core engine for the "Punc" architecture, where the database is the single source of truth for all data models, API contracts, validations, and reactive queries.
It is designed to serve as the validation engine for the "Punc" architecture, where the database is the single source of truth for all data models and API contracts. ## 1. Overview & Architecture
## 🎯 Goals JSPG operates by deeply integrating the JSON Schema Draft 2020-12 specification directly into the Postgres session lifecycle. It is built around three core pillars:
* **Validator**: In-memory, near-instant JSON structural validation and type polymorphism routing.
* **Merger**: Automatically traverse and UPSERT deeply nested JSON graphs into normalized relational tables.
* **Queryer**: Compile JSON Schemas into static, cached SQL SPI `SELECT` plans for fetching full entities or isolated "Stems".
### 🎯 Goals
1. **Draft 2020-12 Compliance**: Attempt to adhere to the official JSON Schema Draft 2020-12 specification. 1. **Draft 2020-12 Compliance**: Attempt to adhere to the official JSON Schema Draft 2020-12 specification.
2. **Ultra-Fast Validation**: Compile schemas into an optimized in-memory representation for near-instant validation during high-throughput workloads. 2. **Ultra-Fast Execution**: Compile schemas into optimized in-memory validation trees and cached SQL SPIs to bypass Postgres Query Builder overheads.
3. **Connection-Bound Caching**: Leverage the PostgreSQL session lifecycle to maintain a per-connection schema cache, eliminating the need for repetitive parsing. 3. **Connection-Bound Caching**: Leverage the PostgreSQL session lifecycle using an **Atomic Swap** pattern. Schemas are 100% frozen, completely eliminating locks during read access.
4. **Structural Inheritance**: Support object-oriented schema design via Implicit Keyword Shadowing and virtual `$family` references. 4. **Structural Inheritance**: Support object-oriented schema design via Implicit Keyword Shadowing and virtual `$family` references natively mapped to Postgres table constraints.
5. **Punc Integration**: validation is aware of the "Punc" context (request/response) and can validate `cue` objects efficiently. 5. **Reactive Beats**: Provide natively generated "Stems" (isolated payload fragments) for dynamic websocket reactivity.
## 🔌 API Reference
The extension exposes the following functions to PostgreSQL:
### `cache_json_schemas(enums jsonb, types jsonb, puncs jsonb) -> jsonb`
Loads and compiles the entire schema registry into the session's memory, atomically replacing the previous validator.
* **Inputs**:
* `enums`: Array of enum definitions.
* `types`: Array of type definitions (core entities).
* `puncs`: Array of punc (function) definitions with request/response schemas.
* **Behavior**:
* Parses all inputs into an internal schema graph.
* Resolves all internal references (`$ref`).
* Generates virtual union schemas for type hierarchies referenced via `$family`.
* Compiles schemas into validators.
* **Returns**: `{"response": "success"}` or an error object.
### `mask_json_schema(schema_id text, instance jsonb) -> jsonb`
Validates a JSON instance and returns a new JSON object with unknown properties removed (pruned) based on the schema.
* **Inputs**:
* `schema_id`: The `$id` of the schema to mask against.
* `instance`: The JSON data to mask.
* **Returns**:
* On success: A `Drop` containing the **masked data**.
* On failure: A `Drop` containing validation errors.
### `validate_json_schema(schema_id text, instance jsonb) -> jsonb`
Validates a JSON instance against a pre-compiled schema.
* **Inputs**:
* `schema_id`: The `$id` of the schema to validate against (e.g., `person`, `save_person.request`).
* `instance`: The JSON data to validate.
* **Returns**:
* On success: `{"response": "success"}`
* On failure: A JSON object containing structured errors (e.g., `{"errors": [...]}`).
### `json_schema_cached(schema_id text) -> bool`
Checks if a specific schema ID is currently present in the cache.
### `clear_json_schemas() -> jsonb`
Clears the current session's schema cache, freeing memory.
### `show_json_schemas() -> jsonb`
Returns a debug dump of the currently cached schemas (for development/debugging).
## ✨ Custom Features & Deviations
JSPG implements specific extensions to the Draft 2020-12 standard to support the Punc architecture's object-oriented needs while heavily optimizing for zero-runtime lookups.
### 1. Polymorphism & Referencing (`$ref`, `$family`, and Native Types)
JSPG replaces the complex, dynamic reference resolution logic of standard JSON Schema (e.g., `$defs`, relative URIs, `$dynamicRef`, `$dynamicAnchor`, `anyOf`) with a strict, explicitly structured global `$id` system. This powers predictable code generation and blazing-fast runtime validation.
#### A. Global `$id` Conventions & Schema Buckets
Every schema is part of a flat, globally addressable namespace. However, where a schema is defined in the database determines its physical boundaries:
* **Types (Entities)**: Schemas defined within a Postgres `type` represent entities. The `$id` must be exactly the type name (`person`) or suffixed (`full.person`). All schemas in this bucket receive strict Native Type Discrimination based on the physical table hierarchy.
* **Puncs (APIs)**: Schemas defined within a `punc` are ad-hoc containers. The `$id` must be exactly `[punc_name].request` or `[punc_name].response`. They are never entities themselves.
* **Enums (Domains)**: Schemas defined within an `enum` represent enum definitions. The `$id` must be exactly the enum name (`job_status`) or suffixed (`short.job_status`).
#### B. Native Type Discrimination (The `variations` Property)
Because `jspg` knows which schemas are Entities based on their origin bucket (Types), it securely and implicitly manages the `"type"` property by attaching `compiled_variations`.
If a schema originates in the `user` bucket, the validator does *not* rigidly require `{"type": "user"}`. Instead, it queries the physical Postgres type inheritance graph (e.g. `[entity, organization, user]`) and allows the JSON to be `{"type": "person"}` or `{"type": "bot"}` automatically, enabling seamless API polymorphism.
#### C. Structural Inheritance & Viral Infection (`$ref`)
`$ref` is used exclusively for structural inheritance.
* **Viral Infection**: If an anonymous schema or an ad-hoc schema (like a Punc Request) `$ref`s a strict Entity schema (like `person.light`), it *virally inherits* the `compiled_variations` of that target. This means a Punc request instantly gains the exact polymorphic security boundaries of the Entity it points to.
* **`$ref` never creates a Union.** When you use `$ref`, you are asking for a single, concrete struct/shape.
#### D. Shape Polymorphism & Virtual Unions (`$family`)
To support polymorphic API contracts (e.g., heterogeneous arrays of generic widgets) without manually writing massive `oneOf` blocks, JSPG provides the `$family` macro.
While `$ref` defines rigid structure, `$family` relies on an abstract **Descendants Graph**.
During compilation, `jspg` temporarily tracks every `$ref` pointer globally to build a reverse-lookup graph of "Descendants". It also calculates the **Inheritance Depth** of every schema (how far removed it is from the root entity).
When `{"$family": "widget"}` is encountered, JSPG:
1. Locates the `widget` schema in the Descendants graph.
2. Expands the macro by finding *every* schema in the entire database that structurally `$ref`s `widget`, directly or indirectly (e.g., `stock.widget`, an anonymous object, etc.).
3. Evaluates the incoming JSON against **every** descendant schema in that family *strictly*.
If you request `{"$family": "light.widget"}`, it simply evaluates all schemas that `$ref` the generic abstract `light.widget` interface.
#### E. Strict Matches & The Depth Heuristic
JSPG strictly enforces that polymorphic structures (`oneOf`, or a `$family` expansion) match **exactly one** valid schema permutation. It does not support fuzzy matching (`anyOf`).
If a JSON payload matches more than one schema in a union (which happens frequently due to implicit inheritance where an object might technically satisfy the requirements of both `entity` and `user`), JSPG automatically applies the **Depth Heuristic Tie-Breaker**:
* It looks up the pre-calculated Inheritance Depth for all valid passing candidates.
* It selects the candidate that is **deepest** in the inheritance tree (the most explicitly defined descendant).
* If multiple passing candidates tie at the exact same depth level, an `AMBIGUOUS` error is thrown, forcing the developer to supply a more precise type discriminator or payload.
This cleanly separates **Database Physics** (derived from the Postgres `Types` bucket and viral `$ref` inheritance) from **Structural Polymorphism** (derived purely from the abstract `$ref` tree).
### 2. Strict by Default & Extensibility
JSPG enforces a "Secure by Default" philosophy. All schemas are treated as if `unevaluatedProperties: false` (and `unevaluatedItems: false`) is set, unless explicitly overridden.
* **Strictness**: By default, any property or array item in the instance data that is not explicitly defined in the schema causes a validation error. This prevents clients from sending undeclared fields or extra array elements.
* **Extensibility (`extensible: true`)**: To allow a free-for-all of additional, undefined properties or extra array items, you must add `"extensible": true` to the schema. This globally disables the strictness check for that object or array, useful for types designed to be completely open.
* **Structured Additional Properties (`additionalProperties: {...}`)**: Instead of a boolean free-for-all, you can define `additionalProperties` as a schema object (e.g., `{"type": "string"}`). This maintains strictness (no arbitrary keys) but allows any extra keys as long as their values match the defined structure.
* **Ref Boundaries**: Strictness is reset when crossing `$ref` boundaries. The referenced schema's strictness is determined by its own definition (strict by default unless `extensible: true`), ignoring the caller's state.
* **Inheritance**: Strictness is inherited. A schema extending a strict parent will also be strict unless it declares itself `extensible: true`. Conversely, a schema extending a loose parent will also be loose unless it declares itself `extensible: false`.
### 3. Implicit Keyword Shadowing
Standard JSON Schema composition (`allOf`) is additive (Intersection), meaning constraints can only be tightened, not replaced. However, JSPG treats `$ref` differently when it appears alongside other properties to support object-oriented inheritance.
* **Inheritance (`$ref` + properties)**: When a schema uses `$ref` and defines its own properties, JSPG implements Smart Merge (or Shadowing). If a property is defined in the current schema, its constraints take precedence over the inherited constraints for that specific keyword.
* **Example**: If Entity defines `type: { const: "entity" }` and Person (which refs Entity) defines `type: { const: "person" }`, validation passes for "person". The local const shadows the inherited const.
* **Granularity**: Shadowing is per-keyword. If Entity defined `type: { const: "entity", minLength: 5 }`, Person would shadow `const` but still inherit `minLength: 5`.
* **Composition (`allOf`)**: When using `allOf`, standard intersection rules apply. No shadowing occurs; all constraints from all branches must pass. This is used for mixins or interfaces.
### 4. Format Leniency for Empty Strings
To simplify frontend form logic, the format validators for `uuid`, `date-time`, and `email` explicitly allow empty strings (`""`). This treats an empty string as "present but unset" rather than "invalid format".
## 🏗️ Architecture
The extension is written in Rust using `pgrx` and structures its schema parser to mirror the Punc Generator's design:
* **Single `Schema` Struct**: A unified struct representing the exact layout of a JSON Schema object, including standard keywords and custom vocabularies (`form`, `display`, etc.).
* **Compiler Phase**: schema JSONs are parsed into this struct, linked (references resolved), and then compiled into an efficient validation tree.
* **Validation Phase**: The compiled validators traverse the JSON instance using `serde_json::Value`.
### Concurrency & Threading ("Immutable Graphs") ### Concurrency & Threading ("Immutable Graphs")
To support high-throughput operations while allowing for runtime updates (e.g., during hot-reloading), JSPG uses an **Atomic Swap** pattern:
To support high-throughput validation while allowing for runtime schema updates (e.g., during development or hot-reloading), JSPG uses an **Atomic Swap** pattern based on 100% immutable schemas.
1. **Parser Phase**: Schema JSONs are parsed into ordered `Schema` structs. 1. **Parser Phase**: Schema JSONs are parsed into ordered `Schema` structs.
2. **Compiler Phase**: The database iterates all parsed schemas and pre-computes native optimization maps: 2. **Compiler Phase**: The database iterates all parsed schemas and pre-computes native optimization maps (Descendants Map, Depths Map, Variations Map).
* **Descendants Map**: A reverse `$ref` lookup graph for instant `$family` resolution. 3. **Immutable Validator**: The `Validator` struct immutably owns the `Database` registry and all its global maps. Schemas themselves are completely frozen; `$ref` strings are resolved dynamically at runtime using pre-computed O(1) maps.
* **Depths Map**: The `$ref` lineage distance of every schema for heuristic tie-breaking. 4. **Lock-Free Reads**: Incoming operations acquire a read lock just long enough to clone the `Arc` inside an `RwLock<Option<Arc<Validator>>>`, ensuring zero blocking during schema updates.
* **Variations Map**: The Native Type inheritance hierarchy.
3. **Immutable Validator**: The `Validator` struct immutably owns the `Database` registry and all its global maps. Once created, a validator instance (and its registry) never changes. Schemas themselves are completely frozen; `$ref` strings are resolved dynamically at runtime using the pre-computed O(1) maps, eliminating the need to physically mutate or link pointers across structures.
4. **Global Pointer**: A global `RwLock<Option<Arc<Validator>>>` holds the current active validator.
5. **Lock-Free Reads**: Validation requests acquire a read lock just long enough to clone the `Arc` (incrementing a reference count), then release the lock immediately. Validation proceeds on the snapshot, ensuring no blocking during schema updates.
6. **Atomic Updates**: When schemas are reloaded (`cache_json_schemas`), a new `Registry` and `Validator` are built entirely on the stack. The global pointer is then atomically swapped to the new instance under a write lock.
## 🧪 Testing ---
Testing is driven by standard Rust unit tests that load JSON fixtures. ## 2. Validator
* **Isolation**: Each test file runs with its own isolated `Registry` and `Validator` instance, created on the stack. This eliminates global state interference and allows tests to run in parallel. The Validator provides strict, schema-driven evaluation for the "Punc" architecture.
* **Fixtures**: The tests are located in `tests/fixtures/*.json` and are executed via `cargo test`.
### API Reference
* `jspg_setup(database jsonb) -> jsonb`: Loads and compiles the entire registry (types, enums, puncs, relations) atomically.
* `mask_json_schema(schema_id text, instance jsonb) -> jsonb`: Validates and prunes unknown properties dynamically, returning masked data.
* `jspg_validate(schema_id text, instance jsonb) -> jsonb`: Returns boolean-like success or structured errors.
* `jspg_teardown() -> jsonb`: Clears the current session's schema cache.
### Custom Features & Deviations
JSPG implements specific extensions to the Draft 2020-12 standard to support the Punc architecture's object-oriented needs while heavily optimizing for zero-runtime lookups.
#### A. Polymorphism & Referencing (`$ref`, `$family`, and Native Types)
* **Native Type Discrimination (`variations`)**: Schemas defined inside a Postgres `type` are Entities. The validator securely and implicitly manages their `"type"` property. If an entity inherits from `user`, incoming JSON can safely define `{"type": "person"}` without errors, thanks to `compiled_variations` inheritance.
* **Structural Inheritance & Viral Infection (`$ref`)**: `$ref` is used exclusively for structural inheritance, *never* for union creation. A Punc request schema that `$ref`s an Entity virally inherits all physical database polymorphism rules for that target.
* **Shape Polymorphism (`$family`)**: Auto-expands polymorphic API lists based on an abstract Descendants Graph. If `{"$family": "widget"}` is used, JSPG evaluates the JSON against every schema that `$ref`s widget.
* **Strict Matches & Depth Heuristic**: Polymorphic structures MUST match exactly **one** schema permutation. If multiple inherited struct permutations pass, JSPG applies the **Depth Heuristic Tie-Breaker**, selecting the candidate deepest in the inheritance tree.
#### B. Strict by Default & Extensibility
* **Strictness**: By default, any property not explicitly defined in the schema causes a validation error (effectively enforcing `additionalProperties: false` globally).
* **Extensibility (`extensible: true`)**: To allow a free-for-all of undefined properties, schemas must explicitly declare `"extensible": true`.
* **Structured Additional Properties**: If `additionalProperties: {...}` is defined as a schema, arbitrary keys are allowed so long as their values match the defined type constraint.
* **Inheritance Boundaries**: Strictness resets when crossing `$ref` boundaries. A schema extending a strict parent remains strict unless it explicitly overrides with `"extensible": true`.
#### C. Implicit Keyword Shadowing
* **Inheritance (`$ref` + properties)**: Unlike standard JSON Schema, when a schema uses `$ref` alongside local properties, JSPG implements **Smart Merge**. Local constraints natively take precedence over (shadow) inherited constraints for the same keyword.
* *Example*: If `entity` has `type: {const: "entity"}`, but `person` defines `type: {const: "person"}`, the local `person` const cleanly overrides the inherited one.
* **Composition (`allOf`)**: When evaluating `allOf`, standard intersection rules apply seamlessly. No shadowing occurs, meaning all constraints from all branches must pass.
#### D. Format Leniency for Empty Strings
To simplify frontend form validation, format validators specifically for `uuid`, `date-time`, and `email` explicitly allow empty strings (`""`), treating them as "present but unset".
---
## 3. Merger
The Merger provides an automated, high-performance graph synchronization engine via the `jspg_merge(cue JSONB)` API. It orchestrates the complex mapping of nested JSON objects into normalized Postgres relational tables, honoring all inheritance and graph constraints.
### Core Features
* **Deep Graph Merging**: The Merger walks arbitrary levels of deeply nested JSON schemas (e.g. tracking an `order`, its `customer`, and an array of its `lines`). It intelligently discovers the correct parent-to-child or child-to-parent Foreign Keys stored in the registry and automatically maps the UUIDs across the relationships during UPSERT.
* **Prefix Foreign Key Matching**: Handles scenario where multiple relations point to the same table by using database Foreign Key constraint prefixes (`fk_`). For example, if a schema has `shipping_address` and `billing_address`, the merger resolves against `fk_shipping_address_entity` vs `fk_billing_address_entity` automatically to correctly route object properties.
* **Dynamic Deduplication & Lookups**: If a nested object is provided without an `id`, the Merger utilizes Postgres `lk_` index constraints defined in the schema registry (e.g. `lk_person` mapped to `first_name` and `last_name`). It dynamically queries these unique matching constraints to discover the correct UUID to perform an UPDATE, preventing data duplication.
* **Hierarchical Table Inheritance**: The Punc system uses distributed table inheritance (e.g. `person` inherits `user` inherits `organization` inherits `entity`). The Merger splits the incoming JSON payload and performs atomic row updates across *all* relevant tables in the lineage map.
* **The Archive Paradigm**: Data is never deleted in the Punc system. The Merger securely enforces referential integrity by toggling the `archived` Boolean flag on the base `entity` table rather than issuing SQL `DELETE` commands.
* **Change Tracking & Reactivity**: The Merger diffs the incoming JSON against the existing database row (utilizing static, `DashMap`-cached `lk_` SELECT string templates). Every detected change is recorded into the `agreego.change` audit table, tracking the user mapping. It then natively uses `pg_notify` to broadcast a completely flat row-level diff out to the Go WebSocket server for O(1) routing.
* **Many-to-Many Graph Edge Management**: Operates seamlessly with the global `agreego.relationship` table, allowing the system to represent and merge arbitrary reified M:M relationships directionally between any two entities.
* **Sparse Updates**: Empty JSON strings `""` are directly bound as explicit SQL `NULL` directives to clear data, whilst omitted (missing) properties skip UPDATE execution entirely, ensuring partial UI submissions do not wipe out sibling fields.
* **Unified Return Structure**: To eliminate UI hydration race conditions and multi-user duplication, `jspg_merge` explicitly strips the response graph and returns only the root `{ "id": "uuid" }` (or an array of IDs for list insertions). External APIs can then explicitly call read APIs to fetch the resulting graph, while the UI relies 100% implicitly on the flat `pg_notify` pipeline for reactive state synchronization.
* **Decoupled SQL Generation**: Because Writes (INSERT/UPDATE) are inherently highly dynamic based on partial payload structures, the Merger generates raw SQL strings dynamically per execution without caching, guaranteeing a minimal memory footprint while scaling optimally.
---
## 4. Queryer
The Queryer transforms Postgres into a pre-compiled Semantic Query Engine via the `jspg_query(schema_id text, cue jsonb)` API, designed to serve the exact shape of Punc responses directly via SQL.
### Core Features
* **Schema-to-SQL Compilation**: Compiles JSON Schema ASTs spanning deep arrays directly into static, pre-planned SQL multi-JOIN queries.
* **DashMap SQL Caching**: Executes compiled SQL via Postgres SPI execution, securely caching the static string compilation templates per schema permutation inside the `GLOBAL_JSPG` application memory, drastically reducing repetitive schema crawling.
* **Dynamic Filtering**: Binds parameters natively through `cue.filters` objects. Dynamically handles string formatting (e.g. parsing `uuid` or formatting date-times) and safely escapes complex combinations utilizing `ILIKE` operations correctly mapped to the originating structural table.
* **The Stem Engine**: Rather than over-fetching heavy Entity payloads and trimming them, Punc Framework Websockets depend on isolated subgraphs defined as **Stems**.
* During initialization, the generator auto-discovers graph boundaries (Stems) inside the schema tree.
* The Queryer prepares dedicated SQL execution templates tailored precisely for that exact `Stem` path (e.g. executing `get_dashboard` queried specifically for the `/owner` stem).
* These Stem outputs instantly hydrate targeted Go Bitsets, providing `O(1)` real-time routing for fractional data payloads without any application-layer overhead.

58
migrate_fixtures.js Normal file
View File

@ -0,0 +1,58 @@
const fs = require('fs');
const path = require('path');
const fixturesDir = path.join(__dirname, 'tests', 'fixtures');
function processFile(filePath) {
const content = fs.readFileSync(filePath, 'utf8');
let data;
try {
data = JSON.parse(content);
} catch (e) {
console.error(`Skipping ${filePath} due to parse error`);
return;
}
let modified = false;
data.forEach(suite => {
if (suite.tests) {
suite.tests.forEach(test => {
if (test.valid !== undefined || test.expect_errors !== undefined) {
if (!test.expect) {
test.expect = {};
}
if (test.valid !== undefined) {
test.expect.success = test.valid;
delete test.valid;
}
if (test.expect_errors !== undefined) {
test.expect.errors = test.expect_errors;
delete test.expect_errors;
}
modified = true;
}
});
}
});
if (modified) {
fs.writeFileSync(filePath, JSON.stringify(data, null, 4));
console.log(`Migrated ${filePath}`);
}
}
function walkDir(dir) {
const files = fs.readdirSync(dir);
files.forEach(file => {
const fullPath = path.join(dir, file);
if (fs.statSync(fullPath).isDirectory()) {
walkDir(fullPath);
} else if (fullPath.endsWith('.json')) {
processFile(fullPath);
}
});
}
walkDir(fixturesDir);
console.log('Done migrating fixtures!');

111
src/database/executor.rs Normal file
View File

@ -0,0 +1,111 @@
use pgrx::prelude::*;
use serde_json::Value;
/// An abstraction over database execution to allow for isolated unit testing
/// without a live Postgres SPI connection.
pub trait DatabaseExecutor: Send + Sync {
/// Executes a query expecting a single JSONB return, representing rows.
fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String>;
/// Executes an operation (INSERT, UPDATE, DELETE, or pg_notify) that does not return rows.
fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String>;
/// Returns the current authenticated user's ID
fn auth_user_id(&self) -> Result<String, String>;
/// Returns the current transaction timestamp
fn timestamp(&self) -> Result<String, String>;
}
/// The production executor that wraps `pgrx::spi::Spi`.
pub struct SpiExecutor;
impl SpiExecutor {
pub fn new() -> Self {
Self {}
}
}
impl DatabaseExecutor for SpiExecutor {
fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String> {
let mut json_args = Vec::new();
let mut args_with_oid: Vec<pgrx::datum::DatumWithOid> = Vec::new();
if let Some(params) = args {
for val in params {
json_args.push(pgrx::JsonB(val.clone()));
}
for j_val in json_args.into_iter() {
args_with_oid.push(pgrx::datum::DatumWithOid::from(j_val));
}
}
Spi::connect(|client| {
match client.select(sql, Some(args_with_oid.len() as i64), &args_with_oid) {
Ok(tup_table) => {
let mut results = Vec::new();
for row in tup_table {
if let Ok(Some(jsonb)) = row.get::<pgrx::JsonB>(1) {
results.push(jsonb.0);
}
}
Ok(Value::Array(results))
}
Err(e) => Err(format!("SPI Query Fetch Failure: {}", e)),
}
})
}
fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String> {
let mut json_args = Vec::new();
let mut args_with_oid: Vec<pgrx::datum::DatumWithOid> = Vec::new();
if let Some(params) = args {
for val in params {
json_args.push(pgrx::JsonB(val.clone()));
}
for j_val in json_args.into_iter() {
args_with_oid.push(pgrx::datum::DatumWithOid::from(j_val));
}
}
Spi::connect_mut(|client| {
match client.update(sql, Some(args_with_oid.len() as i64), &args_with_oid) {
Ok(_) => Ok(()),
Err(e) => Err(format!("SPI Execution Failure: {}", e)),
}
})
}
fn auth_user_id(&self) -> Result<String, String> {
Spi::connect(|client| {
let mut tup_table = client
.select(
"SELECT COALESCE(current_setting('auth.user_id', true), 'ffffffff-ffff-ffff-ffff-ffffffffffff')",
None,
&[],
)
.map_err(|e| format!("SPI Select Error: {}", e))?;
let row = tup_table
.next()
.ok_or("No user id setting returned from context".to_string())?;
let user_id: Option<String> = row.get(1).map_err(|e| e.to_string())?;
user_id.ok_or("Missing user_id".to_string())
})
}
fn timestamp(&self) -> Result<String, String> {
Spi::connect(|client| {
let mut tup_table = client
.select("SELECT clock_timestamp()::text", None, &[])
.map_err(|e| format!("SPI Select Error: {}", e))?;
let row = tup_table
.next()
.ok_or("No clock timestamp returned".to_string())?;
let timestamp: Option<String> = row.get(1).map_err(|e| e.to_string())?;
timestamp.ok_or("Missing timestamp".to_string())
})
}
}

View File

@ -1,23 +1,30 @@
pub mod r#enum; pub mod r#enum;
pub mod executor;
pub mod formats; pub mod formats;
pub mod page; pub mod page;
pub mod punc; pub mod punc;
pub mod relation;
pub mod schema; pub mod schema;
pub mod r#type; pub mod r#type;
use crate::database::r#enum::Enum; use crate::database::r#enum::Enum;
use crate::database::punc::Punc; use crate::database::executor::{DatabaseExecutor, SpiExecutor};
use crate::database::punc::{Punc, Stem};
use crate::database::relation::Relation;
use crate::database::schema::Schema; use crate::database::schema::Schema;
use crate::database::r#type::Type; use crate::database::r#type::Type;
use serde_json::Value;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
pub struct Database { pub struct Database {
pub enums: HashMap<String, Enum>, pub enums: HashMap<String, Enum>,
pub types: HashMap<String, Type>, pub types: HashMap<String, Type>,
pub puncs: HashMap<String, Punc>, pub puncs: HashMap<String, Punc>,
pub relations: HashMap<String, Relation>,
pub schemas: HashMap<String, Schema>, pub schemas: HashMap<String, Schema>,
pub descendants: HashMap<String, Vec<String>>, pub descendants: HashMap<String, Vec<String>>,
pub depths: HashMap<String, usize>, pub depths: HashMap<String, usize>,
pub executor: Box<dyn DatabaseExecutor + Send + Sync>,
} }
impl Database { impl Database {
@ -25,10 +32,12 @@ impl Database {
let mut db = Self { let mut db = Self {
enums: HashMap::new(), enums: HashMap::new(),
types: HashMap::new(), types: HashMap::new(),
relations: HashMap::new(),
puncs: HashMap::new(), puncs: HashMap::new(),
schemas: HashMap::new(), schemas: HashMap::new(),
descendants: HashMap::new(), descendants: HashMap::new(),
depths: HashMap::new(), depths: HashMap::new(),
executor: Box::new(SpiExecutor::new()),
}; };
if let Some(arr) = val.get("enums").and_then(|v| v.as_array()) { if let Some(arr) = val.get("enums").and_then(|v| v.as_array()) {
@ -47,6 +56,14 @@ impl Database {
} }
} }
if let Some(arr) = val.get("relations").and_then(|v| v.as_array()) {
for item in arr {
if let Ok(def) = serde_json::from_value::<Relation>(item.clone()) {
db.relations.insert(def.constraint.clone(), def);
}
}
}
if let Some(arr) = val.get("puncs").and_then(|v| v.as_array()) { if let Some(arr) = val.get("puncs").and_then(|v| v.as_array()) {
for item in arr { for item in arr {
if let Ok(def) = serde_json::from_value::<Punc>(item.clone()) { if let Ok(def) = serde_json::from_value::<Punc>(item.clone()) {
@ -73,12 +90,39 @@ impl Database {
db db
} }
/// Override the default executor for unit testing
pub fn with_executor(mut self, executor: Box<dyn DatabaseExecutor + Send + Sync>) -> Self {
self.executor = executor;
self
}
/// Executes a query expecting a single JSONB array return, representing rows.
pub fn query(&self, sql: &str, args: Option<&[Value]>) -> Result<Value, String> {
self.executor.query(sql, args)
}
/// Executes an operation (INSERT, UPDATE, DELETE, or pg_notify) that does not return rows.
pub fn execute(&self, sql: &str, args: Option<&[Value]>) -> Result<(), String> {
self.executor.execute(sql, args)
}
/// Returns the current authenticated user's ID
pub fn auth_user_id(&self) -> Result<String, String> {
self.executor.auth_user_id()
}
/// Returns the current transaction timestamp
pub fn timestamp(&self) -> Result<String, String> {
self.executor.timestamp()
}
/// Organizes the graph of the database, compiling regex, format functions, and caching relationships. /// Organizes the graph of the database, compiling regex, format functions, and caching relationships.
fn compile(&mut self) -> Result<(), String> { fn compile(&mut self) -> Result<(), String> {
self.collect_schemas(); self.collect_schemas();
self.collect_depths(); self.collect_depths();
self.collect_descendants(); self.collect_descendants();
self.compile_schemas(); self.compile_schemas();
self.collect_stems();
Ok(()) Ok(())
} }
@ -184,4 +228,154 @@ impl Database {
} }
} }
} }
fn collect_stems(&mut self) {
let mut st_map: HashMap<String, Vec<Stem>> = HashMap::new();
for (name, _) in &self.puncs {
let mut stems = Vec::new();
let response_id = format!("{}.response", name);
if let Some(resp_schema) = self.schemas.get(&response_id) {
Self::discover_stems(
&self.types,
&self.schemas,
&self.relations,
&response_id,
resp_schema,
String::from(""),
None,
None,
&mut stems,
);
}
st_map.insert(name.clone(), stems);
}
for (name, stems) in st_map {
if let Some(p) = self.puncs.get_mut(&name) {
p.stems = stems;
}
}
}
fn discover_stems(
types: &HashMap<String, Type>,
schemas: &HashMap<String, Schema>,
relations: &HashMap<String, Relation>,
_schema_id: &str,
schema: &Schema,
current_path: String,
parent_type: Option<String>,
property_name: Option<String>,
stems: &mut Vec<Stem>,
) {
let mut is_entity = false;
let mut entity_type = String::new();
// Check if this schema resolves to an Entity
let mut current_ref = schema.obj.r#ref.clone();
let mut depth = 0;
while let Some(r) = current_ref {
if types.contains_key(&r) {
is_entity = true;
entity_type = r.clone();
break;
}
if let Some(s) = schemas.get(&r) {
current_ref = s.obj.r#ref.clone();
} else {
break;
}
depth += 1;
if depth > 20 {
break;
} // prevent infinite loop
}
if is_entity {
let final_path = if current_path.is_empty() {
"/".to_string()
} else {
current_path.clone()
};
let mut relation_col = None;
if let (Some(pt), Some(prop)) = (&parent_type, &property_name) {
let expected_col = format!("{}_id", prop);
let mut found = false;
// Try to find the exact relation from the database schema
for rel in relations.values() {
if rel.source_type == *pt && rel.destination_type == entity_type {
if rel.source_columns.contains(&expected_col) {
relation_col = Some(expected_col.clone());
found = true;
break;
}
} else if rel.source_type == entity_type && rel.destination_type == *pt {
if rel.source_columns.contains(&expected_col) {
relation_col = Some(expected_col.clone());
found = true;
break;
}
}
}
if !found {
// Fallback guess if explicit matching fails
relation_col = Some(expected_col);
}
}
stems.push(Stem {
path: final_path,
r#type: entity_type.clone(),
relation: relation_col,
});
}
// Pass the new parent downwards
let next_parent = if is_entity {
Some(entity_type.clone())
} else {
parent_type.clone()
};
if let Some(props) = &schema.obj.properties {
for (k, v) in props {
let next_path = format!(
"{}/{}",
if current_path.is_empty() {
""
} else {
&current_path
},
k
);
Self::discover_stems(
types,
schemas,
relations,
"",
v,
next_path,
next_parent.clone(),
Some(k.clone()),
stems,
);
}
}
if let Some(items) = &schema.obj.items {
Self::discover_stems(
types,
schemas,
relations,
"",
items,
current_path.clone(),
next_parent.clone(),
property_name.clone(),
stems,
);
}
}
} }

View File

@ -2,6 +2,14 @@ use crate::database::page::Page;
use crate::database::schema::Schema; use crate::database::schema::Schema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Stem {
pub path: String,
pub r#type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub relation: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(default)] #[serde(default)]
pub struct Punc { pub struct Punc {
@ -17,4 +25,6 @@ pub struct Punc {
pub page: Option<Page>, pub page: Option<Page>,
#[serde(default)] #[serde(default)]
pub schemas: Vec<Schema>, pub schemas: Vec<Schema>,
#[serde(default)]
pub stems: Vec<Stem>,
} }

12
src/database/relation.rs Normal file
View File

@ -0,0 +1,12 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(default)]
pub struct Relation {
pub constraint: String,
pub source_type: String,
pub source_columns: Vec<String>,
pub destination_type: String,
pub destination_columns: Vec<String>,
pub prefix: Option<String>,
}

View File

@ -23,7 +23,8 @@ pub struct Type {
pub hierarchy: Vec<String>, pub hierarchy: Vec<String>,
#[serde(default)] #[serde(default)]
pub variations: HashSet<String>, pub variations: HashSet<String>,
pub relationship: Option<bool>, #[serde(default)]
pub relationship: bool,
#[serde(default)] #[serde(default)]
pub fields: Vec<String>, pub fields: Vec<String>,
pub grouped_fields: Option<Value>, pub grouped_fields: Option<Value>,

View File

@ -16,8 +16,8 @@ impl Jspg {
let database_instance = Database::new(database_val); let database_instance = Database::new(database_val);
let database = Arc::new(database_instance); let database = Arc::new(database_instance);
let validator = Validator::new(database.clone()); let validator = Validator::new(database.clone());
let queryer = Queryer::new(); let queryer = Queryer::new(database.clone());
let merger = Merger::new(); let merger = Merger::new(database.clone());
Self { Self {
database, database,

View File

@ -9,7 +9,6 @@ pub mod merger;
pub mod queryer; pub mod queryer;
pub mod validator; pub mod validator;
use serde_json::json;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
lazy_static::lazy_static! { lazy_static::lazy_static! {
@ -22,7 +21,7 @@ lazy_static::lazy_static! {
} }
#[pg_extern(strict)] #[pg_extern(strict)]
pub fn jspg_cache_database(database: JsonB) -> JsonB { pub fn jspg_setup(database: JsonB) -> JsonB {
let new_jspg = crate::jspg::Jspg::new(&database.0); let new_jspg = crate::jspg::Jspg::new(&database.0);
let new_arc = Arc::new(new_jspg); let new_arc = Arc::new(new_jspg);
@ -35,10 +34,85 @@ pub fn jspg_cache_database(database: JsonB) -> JsonB {
let drop = crate::drop::Drop::success(); let drop = crate::drop::Drop::success();
JsonB(serde_json::to_value(drop).unwrap()) JsonB(serde_json::to_value(drop).unwrap())
} }
#[pg_extern]
pub fn jspg_merge(data: JsonB) -> JsonB {
// Try to acquire a read lock to get a clone of the Engine Arc
let engine_opt = {
let lock = GLOBAL_JSPG.read().unwrap();
lock.clone()
};
match engine_opt {
Some(engine) => match engine.merger.merge(data.0) {
Ok(result) => JsonB(result),
Err(e) => {
let error = crate::drop::Error {
code: "MERGE_FAILED".to_string(),
message: e,
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
let drop = crate::drop::Drop::with_errors(vec![error]);
JsonB(serde_json::to_value(drop).unwrap())
}
},
None => {
let error = crate::drop::Error {
code: "VALIDATOR_NOT_INITIALIZED".to_string(),
message: "The JSPG database has not been cached yet. Run jspg_setup()".to_string(),
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
let drop = crate::drop::Drop::with_errors(vec![error]);
JsonB(serde_json::to_value(drop).unwrap())
}
}
}
#[pg_extern]
pub fn jspg_query(schema_id: &str, stem: Option<&str>, filters: Option<JsonB>) -> JsonB {
let engine_opt = {
let lock = GLOBAL_JSPG.read().unwrap();
lock.clone()
};
match engine_opt {
Some(engine) => match engine
.queryer
.query(schema_id, stem, filters.as_ref().map(|f| &f.0))
{
Ok(res) => JsonB(res),
Err(e) => {
let error = crate::drop::Error {
code: "QUERY_FAILED".to_string(),
message: e,
details: crate::drop::ErrorDetails {
path: schema_id.to_string(),
},
};
JsonB(serde_json::to_value(crate::drop::Drop::with_errors(vec![error])).unwrap())
}
},
None => {
let error = crate::drop::Error {
code: "ENGINE_NOT_INITIALIZED".to_string(),
message: "JSPG extension has not been initialized via jspg_setup".to_string(),
details: crate::drop::ErrorDetails {
path: "".to_string(),
},
};
JsonB(serde_json::to_value(crate::drop::Drop::with_errors(vec![error])).unwrap())
}
}
}
// `mask_json_schema` has been removed as the mask architecture is fully replaced by Spi string queries during DB interactions. // `mask_json_schema` has been removed as the mask architecture is fully replaced by Spi string queries during DB interactions.
#[pg_extern(strict, parallel_safe)] #[pg_extern(strict, parallel_safe)]
pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB { pub fn jspg_validate(schema_id: &str, instance: JsonB) -> JsonB {
// 1. Acquire Snapshot // 1. Acquire Snapshot
let jspg_arc = { let jspg_arc = {
let lock = GLOBAL_JSPG.read().unwrap(); let lock = GLOBAL_JSPG.read().unwrap();
@ -79,7 +153,7 @@ pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
} else { } else {
let error = crate::drop::Error { let error = crate::drop::Error {
code: "VALIDATOR_NOT_INITIALIZED".to_string(), code: "VALIDATOR_NOT_INITIALIZED".to_string(),
message: "The JSPG database has not been cached yet. Run jspg_cache_database()".to_string(), message: "The JSPG database has not been cached yet. Run jspg_setup()".to_string(),
details: crate::drop::ErrorDetails { details: crate::drop::ErrorDetails {
path: "".to_string(), path: "".to_string(),
}, },
@ -89,42 +163,33 @@ pub fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
} }
} }
#[pg_extern(strict, parallel_safe)] #[pg_extern]
pub fn json_schema_cached(schema_id: &str) -> bool { pub fn jspg_get_punc_stems(punc_name: &str) -> JsonB {
if let Some(engine) = GLOBAL_JSPG.read().unwrap().as_ref() { let engine_opt = {
match engine let lock = GLOBAL_JSPG.read().unwrap();
.validator lock.clone()
.validate(schema_id, &serde_json::Value::Null) };
{
Err(e) if e.code == "SCHEMA_NOT_FOUND" => false, match engine_opt {
_ => true, Some(engine) => {
if let Some(punc) = engine.database.puncs.get(punc_name) {
JsonB(serde_json::to_value(&punc.stems).unwrap_or(serde_json::Value::Array(vec![])))
} else {
JsonB(serde_json::Value::Array(vec![]))
}
} }
} else { None => JsonB(serde_json::Value::Array(vec![])),
false
} }
} }
#[pg_extern(strict)] #[pg_extern(strict)]
pub fn clear_json_schemas() -> JsonB { pub fn jspg_teardown() -> JsonB {
let mut lock = GLOBAL_JSPG.write().unwrap(); let mut lock = GLOBAL_JSPG.write().unwrap();
*lock = None; *lock = None;
let drop = crate::drop::Drop::success(); let drop = crate::drop::Drop::success();
JsonB(serde_json::to_value(drop).unwrap()) JsonB(serde_json::to_value(drop).unwrap())
} }
#[pg_extern(strict, parallel_safe)]
pub fn show_json_schemas() -> JsonB {
if let Some(engine) = GLOBAL_JSPG.read().unwrap().as_ref() {
let mut keys = engine.validator.get_schema_ids();
keys.sort();
let drop = crate::drop::Drop::success_with_val(json!(keys));
JsonB(serde_json::to_value(drop).unwrap())
} else {
let drop = crate::drop::Drop::success_with_val(json!([]));
JsonB(serde_json::to_value(drop).unwrap())
}
}
#[cfg(any(test, feature = "pg_test"))] #[cfg(any(test, feature = "pg_test"))]
#[pg_schema] #[pg_schema]
mod tests { mod tests {

24
src/merger/cache.rs Normal file
View File

@ -0,0 +1,24 @@
use dashmap::DashMap;
pub struct StatementCache {
/// Maps a Cache Key (String) -> SQL String (String)
statements: DashMap<String, String>,
}
impl StatementCache {
pub fn new(_max_capacity: u64) -> Self {
Self {
statements: DashMap::new(),
}
}
/// Retrieve an existing statement name by key, or None if it missed
pub fn get(&self, key: &str) -> Option<String> {
self.statements.get(key).map(|v| v.clone())
}
/// Insert a completely verified/compiled statement string into the cache
pub fn insert(&self, key: String, sql: String) {
self.statements.insert(key, sql);
}
}

View File

@ -1,15 +1,737 @@
pub struct Merger { //! The `merger` module handles executing Postgres SPI directives dynamically based on JSON payloads
// To be implemented //! using the structurally isolated schema rules provided by the `Database` registry.
}
impl Default for Merger { pub mod cache;
fn default() -> Self {
Self::new() use crate::database::Database;
}
use serde_json::Value;
use std::sync::Arc;
pub struct Merger {
pub db: Arc<Database>,
pub cache: cache::StatementCache,
} }
impl Merger { impl Merger {
pub fn new() -> Self { pub fn new(db: Arc<Database>) -> Self {
Self {} Self {
db,
cache: cache::StatementCache::new(10_000),
}
}
/// Primary recursive entrypoint that separates Array lists from Object branches
pub fn merge(&self, data: Value) -> Result<Value, String> {
let result = match data {
Value::Array(items) => self.merge_array(items)?,
Value::Object(map) => self.merge_object(map)?,
// Nulls, Strings, Bools, Numbers at root are invalid merge payloads
_ => return Err("Invalid merge payload: root must be an Object or Array".to_string()),
};
Ok(match result {
Value::Object(mut map) => {
let mut out = serde_json::Map::new();
if let Some(id) = map.remove("id") {
out.insert("id".to_string(), id);
}
Value::Object(out)
}
Value::Array(arr) => {
let mut out_arr = Vec::new();
for item in arr {
if let Value::Object(mut map) = item {
let mut out = serde_json::Map::new();
if let Some(id) = map.remove("id") {
out.insert("id".to_string(), id);
}
out_arr.push(Value::Object(out));
} else {
out_arr.push(Value::Null);
}
}
Value::Array(out_arr)
}
other => other,
})
}
/// Handles mapping over an array of entities, executing merge logic on each and returning the resolved list.
fn merge_array(&self, items: Vec<Value>) -> Result<Value, String> {
let mut resolved_items = Vec::new();
for item in items {
// Recursively evaluate each object in the array
let resolved = self.merge(item)?;
resolved_items.push(resolved);
}
Ok(Value::Array(resolved_items))
}
/// Core processing algorithm for a single Entity Object dictionary.
fn merge_object(&self, mut obj: serde_json::Map<String, Value>) -> Result<Value, String> {
// Step 1: Ensure it has a `type` definition to proceed
let type_name = match obj.get("type").and_then(|v| v.as_str()) {
Some(t) => t.to_string(),
None => return Err("Missing required 'type' field on object".to_string()),
};
// Step 2: Extract Type mapping from the Engine
let type_def = match self.db.types.get(&type_name) {
Some(t) => t,
None => return Err(format!("Unknown entity type: {}", type_name)),
};
// Step 3 & 4: (Pre/Post Staging based on `relationship` flag)
if type_def.relationship {
// Relationships: process children FIRST (Post-Staging)
self.process_children(&mut obj, type_def)?;
Ok(Value::Object(self.stage_entity(obj)?))
} else {
// Entities: process core FIRST (Pre-Staging)
let mut staged_obj_map = self.stage_entity(obj)?;
self.process_children(&mut staged_obj_map, type_def)?;
Ok(Value::Object(staged_obj_map))
}
}
/// Iterates values of `obj`, if they are structural (Array/Object), executes `self.merge()` on them.
/// Uses the `Database` registry to find FK relations and apply the IDs upstream/downstream appropriately.
fn process_children(
&self,
obj: &mut serde_json::Map<String, Value>,
type_def: &crate::database::r#type::Type,
) -> Result<(), String> {
let keys: Vec<String> = obj.keys().cloned().collect();
for key in keys {
// Temporarily extract value to process without borrowing Map mutably
let val = match obj.remove(&key) {
Some(v) => v,
None => continue,
};
if val.is_object() || val.is_array() {
// Pre-Process: Propagate parent data to children BEFORE recursing and applying relations
let mut child_val = val;
let mut relation_info = None;
// Try to peek at the child type for relational mapping
let peek_obj = match &child_val {
Value::Object(m) => Some(m),
Value::Array(arr) if !arr.is_empty() => arr[0].as_object(),
_ => None,
};
if let Some(child_map) = peek_obj {
if let Ok(Some(relation)) = self.get_entity_relation(obj, type_def, child_map, &key) {
let child_type_name = child_map.get("type").and_then(|v| v.as_str()).unwrap_or("");
if let Some(c_type) = self.db.types.get(child_type_name) {
let parent_is_source = type_def.hierarchy.contains(&relation.source_type);
let child_is_source = c_type.hierarchy.contains(&relation.source_type);
relation_info = Some((relation, parent_is_source, child_is_source));
}
}
}
// Apply pre-merge mutations mapping IDs
if let Some((relation, _parent_is_source, child_is_source)) = relation_info.as_ref() {
match &mut child_val {
Value::Object(child_map) => {
// Cascade Organization ID
if !child_map.contains_key("organization_id") {
if let Some(org_id) = obj.get("organization_id") {
child_map.insert("organization_id".to_string(), org_id.clone());
}
}
// If child owns FK, parent provides it
if *child_is_source {
Self::apply_entity_relation(
child_map,
&relation.source_columns,
&relation.destination_columns,
obj,
);
}
}
Value::Array(items) => {
for item in items.iter_mut() {
if let Value::Object(child_map) = item {
if !child_map.contains_key("organization_id") {
if let Some(org_id) = obj.get("organization_id") {
child_map.insert("organization_id".to_string(), org_id.clone());
}
}
if *child_is_source {
Self::apply_entity_relation(
child_map,
&relation.source_columns,
&relation.destination_columns,
obj,
);
}
}
}
}
_ => {}
}
}
// RECURSE: Merge the modified children
let merged_val = self.merge(child_val)?;
// Post-Process: Apply relations upwards if parent owns the FK
if let Some((relation, parent_is_source, _child_is_source)) = relation_info {
if parent_is_source {
match &merged_val {
Value::Object(merged_child_map) => {
Self::apply_entity_relation(
obj,
&relation.source_columns,
&relation.destination_columns,
merged_child_map,
);
}
Value::Array(items) if !items.is_empty() => {
if let Value::Object(merged_child_map) = &items[0] {
Self::apply_entity_relation(
obj,
&relation.source_columns,
&relation.destination_columns,
merged_child_map,
);
}
}
_ => {}
}
}
}
obj.insert(key, merged_val);
} else {
obj.insert(key, val);
}
}
Ok(())
}
/// Evaluates `lk_` structures, fetches existing rows via SPI, computes `compare_entities` diff,
/// executes UPDATE/INSERT SPI, and handles `agreego.change` auditing.
fn stage_entity(
&self,
mut obj: serde_json::Map<String, Value>,
) -> Result<serde_json::Map<String, Value>, String> {
let type_name = obj
.get("type")
.and_then(|v| v.as_str())
.unwrap()
.to_string();
let type_def = self.db.types.get(&type_name).unwrap();
// 1. Fetch Existing Entity
let existing_entity = self.fetch_entity(&obj, type_def)?;
// 2. Identify System Keys we don't want to diff
let system_keys = vec![
"id".to_string(),
"type".to_string(),
"organization_id".to_string(),
"created_by".to_string(),
"modified_by".to_string(),
"created_at".to_string(),
"modified_at".to_string(),
];
// 3. Compare entities to find exact changes
let changes = self.compare_entities(
existing_entity.as_ref(),
&obj,
&type_def.fields,
&system_keys,
);
// 4. If no changes and an entity existed, we skip
let is_update = existing_entity.is_some();
if is_update && changes.is_empty() {
return Ok(obj);
}
// 5. Apply correct system fields
let user_id = self.db.auth_user_id()?;
let timestamp = self.db.timestamp()?;
let entity_change_kind = if !is_update {
if !obj.contains_key("id") {
use uuid::Uuid;
obj.insert("id".to_string(), Value::String(Uuid::new_v4().to_string()));
}
obj.insert("created_by".to_string(), Value::String(user_id.clone()));
obj.insert("created_at".to_string(), Value::String(timestamp.clone()));
obj.insert("modified_by".to_string(), Value::String(user_id.clone()));
obj.insert("modified_at".to_string(), Value::String(timestamp.clone()));
"create"
} else {
obj.insert("modified_by".to_string(), Value::String(user_id.clone()));
obj.insert("modified_at".to_string(), Value::String(timestamp.clone()));
"update"
};
// 6. Execute SQL Merges
self.merge_entity_fields(is_update, &type_name, type_def, &changes, &obj)?;
// 7. Fire agreego.change
let mut complete = obj.clone();
if is_update {
// overlay on top of existing for complete state
if let Some(mut existing) = existing_entity {
for (k, v) in &obj {
existing.insert(k.clone(), v.clone());
}
complete = existing;
}
}
let mut notification = serde_json::Map::new();
notification.insert("complete".to_string(), Value::Object(complete.clone()));
let changes_val = if !is_update {
let mut c = changes.clone();
c.insert("type".to_string(), Value::String(type_name.clone()));
Value::Object(c)
} else {
notification.insert("changes".to_string(), Value::Object(changes.clone()));
Value::Object(changes.clone())
};
let change_sql = format!(
"INSERT INTO agreego.change (changes, entity_id, id, kind, modified_at, modified_by) VALUES ({}, {}, {}, {}, {}, {})",
Self::quote_literal(&changes_val),
Self::quote_literal(obj.get("id").unwrap()),
Self::quote_literal(&Value::String(uuid::Uuid::new_v4().to_string())),
Self::quote_literal(&Value::String(entity_change_kind.to_string())),
Self::quote_literal(&Value::String(timestamp.clone())),
Self::quote_literal(&Value::String(user_id.clone()))
);
let notification_json = Value::Object(notification);
let notify_sql = format!(
"SELECT pg_notify('entity', {})",
Self::quote_literal(&Value::String(notification_json.to_string()))
);
self
.db
.execute(&change_sql, None)
.map_err(|e| format!("Executor Error in change: {:?}", e))?;
self
.db
.execute(&notify_sql, None)
.map_err(|e| format!("Executor Error in notify: {:?}", e))?;
Ok(obj)
}
/// Exact replica of `agreego.compare_entities`. Takes a fetched `old` entity from the DB (if any),
/// the `new_fields` from the JSON payload, the `fields` defined on the `Type` hierarchy, and a list of `system_keys`.
/// Returns a clean JSON object containing ONLY the modified keys, or an empty map if No-Op.
fn compare_entities(
&self,
fetched_entity: Option<&serde_json::Map<String, Value>>,
new_fields: &serde_json::Map<String, Value>,
type_fields: &[String],
system_keys: &[String],
) -> serde_json::Map<String, Value> {
let mut changes = serde_json::Map::new();
for (key, new_val) in new_fields {
// 1. Skip if key is not part of the Type's total field schema mapping
if !type_fields.contains(key) {
continue;
}
// 2. Skip strictly managed system audit keys
if system_keys.contains(key) {
continue;
}
match fetched_entity {
// 3a. If no old entity, every valid field is a new "change"
None => {
changes.insert(key.clone(), new_val.clone());
}
// 3b. If old entity exists, strictly compare the values
Some(old_map) => {
let old_val = old_map.get(key).unwrap_or(&Value::Null);
if new_val != old_val {
changes.insert(key.clone(), new_val.clone());
}
}
}
}
changes
}
/// Exact replica of `agreego.reduce_entity_relations`. Resolves Ambiguous Graph paths
/// down to a single distinct FK relationship path based on prefix rules.
fn reduce_entity_relations(
&self,
mut matching_relations: Vec<crate::database::relation::Relation>,
relative: &serde_json::Map<String, Value>,
relation_name: &str,
) -> Result<Option<crate::database::relation::Relation>, String> {
// 0 or 1 relations is an immediate fast-path resolution
if matching_relations.is_empty() {
return Ok(None);
}
if matching_relations.len() == 1 {
return Ok(Some(matching_relations.pop().unwrap()));
}
// Step 1: Check for exact prefix match with the relation_name pointer
let exact_match: Vec<_> = matching_relations
.iter()
.filter(|r| r.prefix.as_deref() == Some(relation_name))
.cloned()
.collect();
if exact_match.len() == 1 {
return Ok(Some(exact_match.into_iter().next().unwrap()));
}
// Step 2: Inverse filter - Remove any relations where their configured prefix IS found
// inside the actual payload data on `relative`
matching_relations.retain(|r| {
if let Some(prefix) = &r.prefix {
// If the prefix exists as a key in the relative JSON payload, we KEEP iter
// (Wait, actually the SQL is `WHERE NOT EXISTS (select mr.prefix where relative ? mr.prefix)`
// Translating: Keep relation R if its prefix is NOT matched in the payload
!relative.contains_key(prefix)
} else {
true // No prefix means we keep it by default
}
});
if matching_relations.len() == 1 {
Ok(Some(matching_relations.pop().unwrap()))
} else {
let constraints: Vec<_> = matching_relations
.iter()
.map(|r| r.constraint.clone())
.collect();
Err(format!(
"AMBIGUOUS_TYPE_RELATIONS: Could not reduce ambiguous type relations: {}",
constraints.join(", ")
))
}
}
/// Exact replica of `agreego.get_entity_relation`. Given two entities (`entity` and `relative`) and the JSON key linking them,
/// it searches the Database graphs for a concrete FK constraint.
fn get_entity_relation(
&self,
_entity: &serde_json::Map<String, Value>,
entity_type: &crate::database::r#type::Type,
relative: &serde_json::Map<String, Value>,
relation_name: &str,
) -> Result<Option<crate::database::relation::Relation>, String> {
let relative_type_name = relative.get("type").and_then(|v| v.as_str()).unwrap_or("");
let relative_type = match self.db.types.get(relative_type_name) {
Some(t) => t,
None => return Ok(None),
};
let mut relative_relations: Vec<crate::database::relation::Relation> = Vec::new();
// 1. Look for direct relationships first
for r in self.db.relations.values() {
if r.source_type != "entity" && r.destination_type != "entity" {
let condition1 = relative_type.hierarchy.contains(&r.source_type)
&& entity_type.hierarchy.contains(&r.destination_type);
let condition2 = entity_type.hierarchy.contains(&r.source_type)
&& relative_type.hierarchy.contains(&r.destination_type);
if condition1 || condition2 {
relative_relations.push(r.clone());
}
}
}
let mut relative_relation =
self.reduce_entity_relations(relative_relations, relative, relation_name)?;
// 2. Look for polymorphic relationships if no direct relationship is found
if relative_relation.is_none() {
let mut poly_relations: Vec<crate::database::relation::Relation> = Vec::new();
for r in self.db.relations.values() {
if r.destination_type == "entity" {
let condition1 = relative_type.hierarchy.contains(&r.source_type);
let condition2 = entity_type.hierarchy.contains(&r.source_type);
if condition1 || condition2 {
poly_relations.push(r.clone());
}
}
}
relative_relation = self.reduce_entity_relations(poly_relations, relative, relation_name)?;
}
Ok(relative_relation)
}
/// Exact replica of `agreego.apply_entity_relation`. Syncs FK column values from the destination to the source.
fn apply_entity_relation(
source_entity: &mut serde_json::Map<String, Value>,
source_columns: &[String],
destination_columns: &[String],
destination_entity: &serde_json::Map<String, Value>,
) {
if source_columns.len() != destination_columns.len() {
// In theory, validation should prevent this, but fail gracefully/ignore if lengths diverge.
return;
}
for i in 0..source_columns.len() {
let dest_val = destination_entity
.get(&destination_columns[i])
.unwrap_or(&Value::Null)
.clone();
source_entity.insert(source_columns[i].clone(), dest_val);
}
}
/// Exact replica of `agreego.fetch_entity`. Dynamically constructs a `SELECT to_jsonb(t1.*) || to_jsonb(t2.*)`
/// based on the Type hierarchy and available `id` or `lookup_fields` presence.
fn fetch_entity(
&self,
entity_fields: &serde_json::Map<String, Value>,
entity_type: &crate::database::r#type::Type,
) -> Result<Option<serde_json::Map<String, Value>>, String> {
let id_val = entity_fields.get("id");
let entity_type_name = entity_type.name.as_str();
// Check if all required lookup keys are PRESENT (value can be anything, including NULL)
let lookup_complete = if entity_type.lookup_fields.is_empty() {
false
} else {
entity_type
.lookup_fields
.iter()
.all(|f| entity_fields.contains_key(f))
};
if id_val.is_none() && !lookup_complete {
return Ok(None);
}
// Build or Retrieve Cached Select/Join clauses
let fetch_sql_template = if let Some(cached) = self.cache.get(entity_type_name) {
cached
} else {
let mut select_list = String::from("to_jsonb(t1.*)");
let mut join_clauses = format!("FROM agreego.\"{}\" t1", entity_type.hierarchy[0]);
for (i, table_name) in entity_type.hierarchy.iter().enumerate().skip(1) {
let t_alias = format!("t{}", i + 1);
join_clauses.push_str(&format!(
" LEFT JOIN agreego.\"{}\" {} ON {}.id = t1.id",
table_name, t_alias, t_alias
));
select_list.push_str(&format!(" || to_jsonb({}.*)", t_alias));
}
let template = format!("SELECT {} {}", select_list, join_clauses);
self
.cache
.insert(entity_type_name.to_string(), template.clone());
template
};
// Build WHERE Clauses
let mut id_condition = None;
if let Some(id) = id_val {
id_condition = Some(format!("t1.id = {}", Self::quote_literal(id)));
}
let mut lookup_condition = None;
if lookup_complete {
let mut lookup_predicates = Vec::new();
for column in &entity_type.lookup_fields {
let val = entity_fields.get(column).unwrap_or(&Value::Null);
if column == "type" {
lookup_predicates.push(format!("t1.\"{}\" = {}", column, Self::quote_literal(val)));
} else {
if val.as_str() == Some("") || val.is_null() {
lookup_predicates.push(format!("\"{}\" IS NULL", column));
} else {
lookup_predicates.push(format!("\"{}\" = {}", column, Self::quote_literal(val)));
}
}
}
lookup_condition = Some(lookup_predicates.join(" AND "));
}
// Determine final WHERE clause based on available conditions
let where_clause = match (id_condition, lookup_condition) {
(Some(id_cond), Some(lookup_cond)) => format!("WHERE ({}) OR ({})", id_cond, lookup_cond),
(Some(id_cond), None) => format!("WHERE {}", id_cond),
(None, Some(lookup_cond)) => format!("WHERE {}", lookup_cond),
(None, None) => return Ok(None),
};
// Construct Final Query
let fetch_sql = format!("{} {}", fetch_sql_template, where_clause);
// Execute and Return Result via Database Executor
let fetched = match self.db.query(&fetch_sql, None) {
Ok(Value::Array(table)) => {
if table.len() > 1 {
Err(format!(
"TOO_MANY_LOOKUP_ROWS: Lookup for {} found too many existing rows",
entity_type_name
))
} else if table.is_empty() {
Ok(None)
} else {
let row = table.first().unwrap();
match row {
Value::Object(map) => Ok(Some(map.clone())),
other => Err(format!(
"Expected fetch_entity to return JSON object, got: {:?}",
other
)),
}
}
}
Ok(other) => Err(format!(
"Expected array from query in fetch_entity, got: {:?}",
other
)),
Err(e) => Err(format!("SPI error in fetch_entity: {:?}", e)),
}?;
Ok(fetched)
}
/// Exact replica of `agreego.merge_entity_fields`. Issues an INSERT or UPDATE per table
/// in the Type's hierarchy, filtering out keys that don't belong to the specific table block.
fn merge_entity_fields(
&self,
is_update: bool,
entity_type_name: &str,
entity_type: &crate::database::r#type::Type,
changes: &serde_json::Map<String, Value>,
full_entity: &serde_json::Map<String, Value>,
) -> Result<(), String> {
let id_str = match full_entity.get("id").and_then(|v| v.as_str()) {
Some(id) => id,
None => return Err("Missing 'id' for merge execution".to_string()),
};
let grouped_fields = match &entity_type.grouped_fields {
Some(Value::Object(map)) => map,
_ => {
return Err(format!(
"Grouped fields missing for type {}",
entity_type_name
));
}
};
for table_name in &entity_type.hierarchy {
// get the fields for this specific table (from grouped_fields)
let table_fields = match grouped_fields.get(table_name).and_then(|v| v.as_array()) {
Some(arr) => arr
.iter()
.filter_map(|v| v.as_str().map(|s| s.to_string()))
.collect::<Vec<_>>(),
None => continue,
};
let mut my_changes = Vec::new();
for field in &table_fields {
if changes.contains_key(field) || (!is_update && full_entity.contains_key(field)) {
// For inserts we want all provided fields. For updates we only want changes.
my_changes.push(field.clone());
}
}
if is_update {
if my_changes.is_empty() {
continue;
}
let mut set_clauses = Vec::new();
for field in &my_changes {
let val = changes.get(field).unwrap();
set_clauses.push(format!("\"{}\" = {}", field, Self::quote_literal(val)));
}
let sql = format!(
"UPDATE agreego.\"{}\" SET {} WHERE id = {}",
table_name,
set_clauses.join(", "),
Self::quote_literal(&Value::String(id_str.to_string()))
);
self
.db
.execute(&sql, None)
.map_err(|e| format!("SPI Error in UPDATE: {:?}", e))?;
} else {
// INSERT
let mut columns = Vec::new();
let mut values = Vec::new();
for field in &my_changes {
columns.push(format!("\"{}\"", field));
let val = full_entity.get(field).unwrap();
values.push(Self::quote_literal(val));
}
// Ensure 'id' and 'type' are present if required by this specific table schema chunk
if !columns.contains(&"\"id\"".to_string()) && table_fields.contains(&"id".to_string()) {
columns.push("\"id\"".to_string());
values.push(Self::quote_literal(&Value::String(id_str.to_string())));
}
if !columns.contains(&"\"type\"".to_string()) && table_fields.contains(&"type".to_string())
{
columns.push("\"type\"".to_string());
values.push(Self::quote_literal(&Value::String(
entity_type_name.to_string(),
)));
}
if columns.is_empty() {
continue;
}
let sql = format!(
"INSERT INTO agreego.\"{}\" ({}) VALUES ({})",
table_name,
columns.join(", "),
values.join(", ")
);
self
.db
.execute(&sql, None)
.map_err(|e| format!("SPI Error in INSERT: {:?}", e))?;
}
}
Ok(())
}
/// Helper to emulate Postgres `quote_literal`
fn quote_literal(val: &Value) -> String {
match val {
Value::Null => "NULL".to_string(),
Value::Bool(b) => {
if *b {
"true".to_string()
} else {
"false".to_string()
}
}
Value::Number(n) => n.to_string(),
Value::String(s) => format!("'{}'", s.replace('\'', "''")),
_ => format!(
"'{}'",
serde_json::to_string(val).unwrap().replace('\'', "''")
),
}
} }
} }

369
src/queryer/compiler.rs Normal file
View File

@ -0,0 +1,369 @@
use crate::database::Database;
use std::sync::Arc;
pub struct SqlCompiler {
pub db: Arc<Database>,
}
impl SqlCompiler {
pub fn new(db: Arc<Database>) -> Self {
Self { db }
}
/// Compiles a JSON schema into a nested PostgreSQL query returning JSONB
pub fn compile(
&self,
schema_id: &str,
stem_path: Option<&str>,
filter_keys: &[String],
) -> Result<String, String> {
let schema = self
.db
.schemas
.get(schema_id)
.ok_or_else(|| format!("Schema not found: {}", schema_id))?;
let target_schema = if let Some(path) = stem_path.filter(|p| !p.is_empty() && *p != "/") {
self.resolve_stem(schema, path)?
} else {
schema
};
// 1. We expect the top level to typically be an Object or Array
let (sql, _) = self.walk_schema(target_schema, "t1", None, filter_keys)?;
Ok(sql)
}
fn resolve_stem<'a>(
&'a self,
mut schema: &'a crate::database::schema::Schema,
path: &str,
) -> Result<&'a crate::database::schema::Schema, String> {
let parts: Vec<&str> = path.trim_start_matches('/').split('/').collect();
for part in parts {
let mut current = schema;
let mut depth = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth += 1;
if depth > 20 {
break;
}
}
if current.obj.properties.is_none() && current.obj.items.is_some() {
if let Some(items) = &current.obj.items {
current = items;
let mut depth2 = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth2 += 1;
if depth2 > 20 {
break;
}
}
}
}
if let Some(props) = &current.obj.properties {
if let Some(next_schema) = props.get(part) {
schema = next_schema;
} else {
return Err(format!("Stem part '{}' not found in schema", part));
}
} else {
return Err(format!(
"Cannot resolve stem part '{}': not an object",
part
));
}
}
let mut current = schema;
let mut depth = 0;
while let Some(r) = &current.obj.r#ref {
if let Some(s) = self.db.schemas.get(r) {
current = s;
} else {
break;
}
depth += 1;
if depth > 20 {
break;
}
}
Ok(current)
}
/// Recursively walks the schema AST emitting native PostgreSQL jsonb mapping
/// Returns a tuple of (SQL_String, Field_Type)
fn walk_schema(
&self,
schema: &crate::database::schema::Schema,
parent_alias: &str,
prop_name_context: Option<&str>,
filter_keys: &[String],
) -> Result<(String, String), String> {
// Determine the base schema type (could be an array, object, or literal)
match &schema.obj.type_ {
Some(crate::database::schema::SchemaTypeOrArray::Single(t)) if t == "array" => {
// Handle Arrays:
if let Some(items) = &schema.obj.items {
if let Some(ref_id) = &items.obj.r#ref {
if let Some(type_def) = self.db.types.get(ref_id) {
return self.compile_entity_node(
items,
type_def,
parent_alias,
prop_name_context,
true,
filter_keys,
);
}
}
let (item_sql, _) =
self.walk_schema(items, parent_alias, prop_name_context, filter_keys)?;
return Ok((
format!("(SELECT jsonb_agg({}) FROM TODO)", item_sql),
"array".to_string(),
));
}
Ok((
"SELECT jsonb_agg(TODO) FROM TODO".to_string(),
"array".to_string(),
))
}
_ => {
// Handle Objects & Direct Refs
if let Some(ref_id) = &schema.obj.r#ref {
// If it's a $ref, check if it points to an Entity Type
if let Some(type_def) = self.db.types.get(ref_id) {
return self.compile_entity_node(
schema,
type_def,
parent_alias,
prop_name_context,
false,
filter_keys,
);
}
// If it's just an ad-hoc struct ref, we should resolve it
if let Some(target_schema) = self.db.schemas.get(ref_id) {
return self.walk_schema(target_schema, parent_alias, prop_name_context, filter_keys);
}
return Err(format!("Unresolved $ref: {}", ref_id));
}
// Just an inline object definition?
if let Some(props) = &schema.obj.properties {
return self.compile_inline_object(props, parent_alias, filter_keys);
}
// Literal fallback
Ok((
format!(
"{}.{}",
parent_alias,
prop_name_context.unwrap_or("unknown_prop")
),
"string".to_string(),
))
}
}
}
fn compile_entity_node(
&self,
schema: &crate::database::schema::Schema,
type_def: &crate::database::r#type::Type,
parent_alias: &str,
prop_name: Option<&str>,
is_array: bool,
filter_keys: &[String],
) -> Result<(String, String), String> {
// We are compiling a query block for an Entity.
let mut select_args = Vec::new();
// Mapping table hierarchy to aliases, e.g., ["person", "user", "organization", "entity"]
let local_ctx = format!("{}_{}", parent_alias, prop_name.unwrap_or("obj"));
// e.g., parent_t1_contact -> we'll use t1 for the first of this block, t2 for the second, etc.
// Actually, local_ctx can just be exactly that prop's unique path.
let mut table_aliases = std::collections::HashMap::new();
let mut from_clauses = Vec::new();
for (i, table_name) in type_def.hierarchy.iter().enumerate() {
let alias = format!("{}_t{}", local_ctx, i + 1);
table_aliases.insert(table_name.clone(), alias.clone());
if i == 0 {
from_clauses.push(format!("agreego.{} {}", table_name, alias));
} else {
// Join to previous
let prev_alias = format!("{}_t{}", local_ctx, i);
from_clauses.push(format!(
"JOIN agreego.{} {} ON {}.id = {}.id",
table_name, alias, alias, prev_alias
));
}
}
// Now, let's map properties from the schema to the correct table alias using grouped_fields
// grouped_fields is { "person": ["first_name", ...], "user": ["password"], ... }
let grouped_fields = type_def.grouped_fields.as_ref().and_then(|v| v.as_object());
if let Some(props) = &schema.obj.properties {
for (prop_key, prop_schema) in props {
// Find which table owns this property
// Find which table owns this property
let mut owner_alias = table_aliases
.get("entity")
.cloned()
.unwrap_or_else(|| format!("{}_t_err", parent_alias));
if let Some(gf) = grouped_fields {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(prop_key)) {
owner_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| parent_alias.to_string());
break;
}
}
}
}
// Now we know `owner_alias`, e.g., `parent_t1` or `parent_t3`.
// Walk the property to get its SQL value
let (val_sql, _) =
self.walk_schema(prop_schema, &owner_alias, Some(prop_key), filter_keys)?;
select_args.push(format!("'{}', {}", prop_key, val_sql));
}
}
let jsonb_obj_sql = if select_args.is_empty() {
"jsonb_build_object()".to_string()
} else {
format!("jsonb_build_object({})", select_args.join(", "))
};
let base_alias = table_aliases
.get(&type_def.name)
.cloned()
.unwrap_or_else(|| "err".to_string());
let mut where_clauses = Vec::new();
where_clauses.push(format!("NOT {}.archived", base_alias));
// Filter Mapping - Only append filters if this is the ROOT table query (i.e. parent_alias is "t1")
// Because cue.filters operates strictly on top-level root properties right now.
if parent_alias == "t1" && prop_name.is_none() {
for (i, filter_key) in filter_keys.iter().enumerate() {
// Find which table owns this filter key
let mut filter_alias = base_alias.clone(); // default to root table (e.g. t3 entity)
if let Some(gf) = type_def.grouped_fields.as_ref().and_then(|v| v.as_object()) {
for (t_name, fields_val) in gf {
if let Some(fields_arr) = fields_val.as_array() {
if fields_arr.iter().any(|v| v.as_str() == Some(filter_key)) {
filter_alias = table_aliases
.get(t_name)
.cloned()
.unwrap_or_else(|| base_alias.clone());
break;
}
}
}
}
let mut is_ilike = false;
let mut cast = "";
// Check schema for filter_key to determine datatype operation
if let Some(props) = &schema.obj.properties {
if let Some(ps) = props.get(filter_key) {
let is_enum = ps.obj.enum_.is_some();
if let Some(crate::database::schema::SchemaTypeOrArray::Single(t)) = &ps.obj.type_ {
if t == "string" {
if ps.obj.format.as_deref() == Some("uuid") {
cast = "::uuid";
} else if ps.obj.format.as_deref() == Some("date-time") {
cast = "::timestamptz";
} else if !is_enum {
is_ilike = true;
}
} else if t == "boolean" {
cast = "::boolean";
} else if t == "integer" || t == "number" {
cast = "::numeric";
}
}
}
}
// Add to WHERE clause using 1-indexed args pointer: $1, $2
if is_ilike {
let param = format!("${}#>>'{{}}'", i + 1);
where_clauses.push(format!("{}.{} ILIKE {}", filter_alias, filter_key, param));
} else {
let param = format!("(${}#>>'{{}}'){}", i + 1, cast);
where_clauses.push(format!("{}.{} = {}", filter_alias, filter_key, param));
}
}
}
// Resolve FK relationship constraint if this is a nested subquery
if let Some(_prop) = prop_name {
// MOCK relation resolution (will integrate with `get_entity_relation` properly)
// By default assume FK is parent_id on child
where_clauses.push(format!("{}.parent_id = {}.id", base_alias, parent_alias));
}
// Wrap the object in the final array or object SELECT
let selection = if is_array {
format!("COALESCE(jsonb_agg({}), '[]'::jsonb)", jsonb_obj_sql)
} else {
jsonb_obj_sql
};
let full_sql = format!(
"(SELECT {} FROM {} WHERE {})",
selection,
from_clauses.join(" "),
where_clauses.join(" AND ")
);
Ok((
full_sql,
if is_array {
"array".to_string()
} else {
"object".to_string()
},
))
}
fn compile_inline_object(
&self,
props: &std::collections::BTreeMap<String, std::sync::Arc<crate::database::schema::Schema>>,
parent_alias: &str,
filter_keys: &[String],
) -> Result<(String, String), String> {
let mut build_args = Vec::new();
for (k, v) in props {
let (child_sql, _) = self.walk_schema(v, parent_alias, Some(k), filter_keys)?;
build_args.push(format!("'{}', {}", k, child_sql));
}
let combined = format!("jsonb_build_object({})", build_args.join(", "));
Ok((combined, "object".to_string()))
}
}

View File

@ -1,15 +1,83 @@
pub struct Queryer { use crate::database::Database;
// To be implemented use std::sync::Arc;
}
impl Default for Queryer { pub mod compiler;
fn default() -> Self {
Self::new() use dashmap::DashMap;
}
pub struct Queryer {
pub db: Arc<Database>,
cache: DashMap<String, String>,
} }
impl Queryer { impl Queryer {
pub fn new() -> Self { pub fn new(db: Arc<Database>) -> Self {
Self {} Self {
db,
cache: DashMap::new(),
}
}
/// Entrypoint to execute a dynamically compiled query based on a schema
pub fn query(
&self,
schema_id: &str,
stem_opt: Option<&str>,
filters: Option<&serde_json::Value>,
) -> Result<serde_json::Value, String> {
let filters_map: Option<&serde_json::Map<String, serde_json::Value>> =
filters.and_then(|f| f.as_object());
// Generate Permutation Cache Key: schema_id + sorted filter keys
let mut filter_keys: Vec<String> = Vec::new();
if let Some(fm) = filters_map {
for key in fm.keys() {
filter_keys.push(key.clone());
}
}
filter_keys.sort();
let stem_key = stem_opt.unwrap_or("/");
let cache_key = format!("{}(Stem:{}):{}", schema_id, stem_key, filter_keys.join(","));
let sql = if let Some(cached_sql) = self.cache.get(&cache_key) {
cached_sql.value().clone()
} else {
// Compile the massive base SQL string
let compiler = compiler::SqlCompiler::new(self.db.clone());
let compiled_sql = compiler.compile(schema_id, stem_opt, &filter_keys)?;
self.cache.insert(cache_key.clone(), compiled_sql.clone());
compiled_sql
};
// 2. Prepare the execution arguments from the filters
let mut args: Vec<serde_json::Value> = Vec::new();
if let Some(fm) = filters_map {
for (_i, key) in filter_keys.iter().enumerate() {
if let Some(val) = fm.get(key) {
args.push(val.clone());
}
}
}
// 3. Execute via Database Executor
let fetched = match self.db.query(&sql, Some(&args)) {
Ok(serde_json::Value::Array(table)) => {
if table.is_empty() {
Ok(serde_json::Value::Null)
} else {
// We expect the query to return a single JSONB column, already unpacked from row[0]
Ok(table.first().unwrap().clone())
}
}
Ok(other) => Err(format!(
"Expected array from generic query, got: {:?}",
other
)),
Err(e) => Err(format!("SPI error in queryer: {}", e)),
}?;
Ok(fetched)
} }
} }

View File

@ -10,12 +10,48 @@ struct TestSuite {
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
struct TestCase { pub struct TestCase {
description: String, pub description: String,
data: serde_json::Value,
valid: bool, #[serde(default = "default_action")]
// Support explicit schema ID target for test case pub action: String, // "validate", "merge", or "query"
schema_id: String,
// For Validate & Query
#[serde(default)]
pub schema_id: String,
// For Query
#[serde(default)]
pub stem: Option<String>,
#[serde(default)]
pub filters: Option<serde_json::Value>,
// For Merge & Validate
#[serde(default)]
pub data: Option<serde_json::Value>,
// For Merge & Query mocks
#[serde(default)]
pub mocks: Option<serde_json::Value>,
pub expect: Option<ExpectBlock>,
// Legacy support for older tests to avoid migrating them all instantly
pub valid: Option<bool>,
pub expect_errors: Option<Vec<serde_json::Value>>,
}
fn default_action() -> String {
"validate".to_string()
}
#[derive(Debug, Deserialize)]
pub struct ExpectBlock {
pub success: bool,
pub result: Option<serde_json::Value>,
pub errors: Option<Vec<serde_json::Value>>,
pub sql_patterns: Option<Vec<String>>,
} }
// use crate::validator::registry::REGISTRY; // No longer used directly for tests! // use crate::validator::registry::REGISTRY; // No longer used directly for tests!

View File

@ -0,0 +1,43 @@
use jspg::database::executor::DatabaseExecutor;
use serde_json::Value;
use std::sync::Mutex;
pub struct MockExecutor {
pub query_responses: Mutex<Vec<Result<Value, String>>>,
pub execute_responses: Mutex<Vec<Result<(), String>>>,
}
impl MockExecutor {
pub fn new() -> Self {
Self {
query_responses: Mutex::new(Vec::new()),
execute_responses: Mutex::new(Vec::new()),
}
}
}
impl DatabaseExecutor for MockExecutor {
fn query(&self, _sql: &str, _args: Option<&[Value]>) -> Result<Value, String> {
let mut responses = self.query_responses.lock().unwrap();
if responses.is_empty() {
return Ok(Value::Array(vec![]));
}
responses.remove(0)
}
fn execute(&self, _sql: &str, _args: Option<&[Value]>) -> Result<(), String> {
let mut responses = self.execute_responses.lock().unwrap();
if responses.is_empty() {
return Ok(());
}
responses.remove(0)
}
fn auth_user_id(&self) -> Result<String, String> {
Ok("00000000-0000-0000-0000-000000000000".to_string())
}
fn timestamp(&self) -> Result<String, String> {
Ok("2026-03-10T00:00:00Z".to_string())
}
}

1
tests/database/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod executor;

View File

@ -2,20 +2,21 @@ use ::jspg::*;
use pgrx::JsonB; use pgrx::JsonB;
use serde_json::json; use serde_json::json;
pub mod database;
#[test] #[test]
fn test_library_api() { fn test_library_api() {
// 1. Initially, schemas are not cached. // 1. Initially, schemas are not cached.
assert!(!json_schema_cached("test_schema"));
// Expected uninitialized drop format: errors + null response // Expected uninitialized drop format: errors + null response
let uninitialized_drop = validate_json_schema("test_schema", JsonB(json!({}))); let uninitialized_drop = jspg_validate("test_schema", JsonB(json!({})));
assert_eq!( assert_eq!(
uninitialized_drop.0, uninitialized_drop.0,
json!({ json!({
"type": "drop", "type": "drop",
"errors": [{ "errors": [{
"code": "VALIDATOR_NOT_INITIALIZED", "code": "VALIDATOR_NOT_INITIALIZED",
"message": "JSON Schemas have not been cached yet. Run cache_json_schemas()", "message": "The JSPG database has not been cached yet. Run jspg_setup()",
"details": { "path": "" } "details": { "path": "" }
}] }]
}) })
@ -25,6 +26,7 @@ fn test_library_api() {
let db_json = json!({ let db_json = json!({
"puncs": [], "puncs": [],
"enums": [], "enums": [],
"relations": [],
"types": [{ "types": [{
"schemas": [{ "schemas": [{
"$id": "test_schema", "$id": "test_schema",
@ -37,7 +39,7 @@ fn test_library_api() {
}] }]
}); });
let cache_drop = jspg_cache_database(JsonB(db_json)); let cache_drop = jspg_setup(JsonB(db_json));
assert_eq!( assert_eq!(
cache_drop.0, cache_drop.0,
json!({ json!({
@ -46,20 +48,8 @@ fn test_library_api() {
}) })
); );
// 3. Check schemas are cached
assert!(json_schema_cached("test_schema"));
let show_drop = show_json_schemas();
assert_eq!(
show_drop.0,
json!({
"type": "drop",
"response": ["test_schema"]
})
);
// 4. Validate Happy Path // 4. Validate Happy Path
let happy_drop = validate_json_schema("test_schema", JsonB(json!({"name": "Neo"}))); let happy_drop = jspg_validate("test_schema", JsonB(json!({"name": "Neo"})));
assert_eq!( assert_eq!(
happy_drop.0, happy_drop.0,
json!({ json!({
@ -69,7 +59,7 @@ fn test_library_api() {
); );
// 5. Validate Unhappy Path // 5. Validate Unhappy Path
let unhappy_drop = validate_json_schema("test_schema", JsonB(json!({"wrong": "data"}))); let unhappy_drop = jspg_validate("test_schema", JsonB(json!({"wrong": "data"})));
assert_eq!( assert_eq!(
unhappy_drop.0, unhappy_drop.0,
json!({ json!({
@ -90,7 +80,7 @@ fn test_library_api() {
); );
// 6. Clear Schemas // 6. Clear Schemas
let clear_drop = clear_json_schemas(); let clear_drop = jspg_teardown();
assert_eq!( assert_eq!(
clear_drop.0, clear_drop.0,
json!({ json!({
@ -98,5 +88,4 @@ fn test_library_api() {
"response": "success" "response": "success"
}) })
); );
assert!(!json_schema_cached("test_schema"));
} }