Compare commits

...

13 Commits

Author SHA1 Message Date
3b18901bda version: 1.0.20 2025-04-21 17:11:30 -04:00
b8c0e08068 more filtering 2025-04-21 17:11:24 -04:00
c734983a59 version: 1.0.19 2025-04-21 16:15:08 -04:00
9b11f661bc fixed release bug 2025-04-21 16:15:02 -04:00
f3a733626e version: 1.0.18 2025-04-21 16:13:16 -04:00
2bcdb8adbb version: 1.0.17 2025-04-21 16:11:31 -04:00
3988308965 branch error filtering 2025-04-21 16:11:12 -04:00
b7f528d1f6 flow 2025-04-16 21:14:07 -04:00
2febb292dc flow update 2025-04-16 20:00:35 -04:00
d1831a28ec flow update 2025-04-16 19:34:09 -04:00
c5834ac544 flow updated 2025-04-16 18:07:41 -04:00
eb25f8489e version: 1.0.16 2025-04-16 14:43:07 -04:00
21937db8de improved compile schema error messages 2025-04-16 14:42:57 -04:00
6 changed files with 155 additions and 103 deletions

4
.env
View File

@ -1,7 +1,7 @@
ENVIRONMENT=local
DATABASE_PASSWORD=QgSvstSjoc6fKphMzNgT3SliNY10eSRS
DATABASE_PASSWORD=tIr4TJ0qUwGVM0rlQSe3W7Tgpi33zPbk
DATABASE_ROLE=agreego_admin
DATABASE_HOST=127.1.27.9
DATABASE_HOST=127.1.27.4
DATABASE_PORT=5432
POSTGRES_PASSWORD=xzIq5JT0xY3F+2m1GtnrKDdK29sNSXVVYZHPKJVh8pI=
DATABASE_NAME=agreego

106
flow
View File

@ -9,7 +9,7 @@ source ./flows/rust
# Vars
POSTGRES_VERSION="17"
POSTGRES_CONFIG_PATH="/opt/homebrew/opt/postgresql@${POSTGRES_VERSION}/bin/pg_config"
DEPENDENCIES=(cargo git icu4c pkg-config "postgresql@${POSTGRES_VERSION}")
DEPENDENCIES+=(icu4c pkg-config "postgresql@${POSTGRES_VERSION}")
CARGO_DEPENDENCIES=(cargo-pgrx==0.14.0)
GITEA_ORGANIZATION="cellular"
GITEA_REPOSITORY="jspg"
@ -20,133 +20,125 @@ env() {
# If not set, try to get it from kubectl
GITEA_TOKEN=$(kubectl get secret -n cellular gitea-git -o jsonpath='{.data.token}' | base64 --decode)
if [ -z "$GITEA_TOKEN" ]; then
echo -e "❌ ${RED}GITEA_TOKEN is not set and couldn't be retrieved from kubectl${RESET}" >&2
exit 1
error "GITEA_TOKEN is not set and couldn't be retrieved from kubectl" >&2
return 2
fi
export GITEA_TOKEN
fi
echo -e "💰 ${GREEN}Environment variables set${RESET}"
success "Environment variables set"
}
pgrx-prepare() {
echo -e "${BLUE}Initializing pgrx...${RESET}"
info "Initializing pgrx..."
# Explicitly point to the postgresql@${POSTGRES_VERSION} pg_config, don't rely on 'which'
local POSTGRES_CONFIG_PATH="/opt/homebrew/opt/postgresql@${POSTGRES_VERSION}/bin/pg_config"
if [ ! -x "$POSTGRES_CONFIG_PATH" ]; then
echo -e "${RED}Error: pg_config not found or not executable at $POSTGRES_CONFIG_PATH.${RESET}"
echo -e "${YELLOW}Ensure postgresql@${POSTGRES_VERSION} is installed correctly via Homebrew.${RESET}"
exit 1
error "pg_config not found or not executable at $POSTGRES_CONFIG_PATH."
warning "Ensure postgresql@${POSTGRES_VERSION} is installed correctly via Homebrew."
return 2
fi
if cargo pgrx init --pg"$POSTGRES_VERSION"="$POSTGRES_CONFIG_PATH"; then
echo -e "${GREEN}pgrx initialized successfully.${RESET}"
success "pgrx initialized successfully."
else
echo -e "${RED}Failed to initialize pgrx. Check PostgreSQL development packages are installed and $POSTGRES_CONFIG_PATH is valid.${RESET}"
exit 1
error "Failed to initialize pgrx. Check PostgreSQL development packages are installed and $POSTGRES_CONFIG_PATH is valid."
return 2
fi
}
build() {
local version
version=$(get-version) || return 1
version=$(get-version) || return $?
local package_dir="./package"
local tarball_name="${GITEA_REPOSITORY}.tar.gz"
local tarball_path="${package_dir}/${tarball_name}"
echo -e "📦 Creating source tarball v$version for ${GITEA_REPOSITORY} in $package_dir..."
info "Creating source tarball v$version for ${GITEA_REPOSITORY} in $package_dir..."
# Clean previous package dir
rm -rf "${package_dir}"
mkdir -p "${package_dir}"
# Create the source tarball excluding specified patterns
echo -e " ${CYAN}Creating tarball: ${tarball_path}${RESET}"
info "Creating tarball: ${tarball_path}"
if tar --exclude='.git*' --exclude='./target' --exclude='./package' --exclude='./flows' --exclude='./flow' -czf "${tarball_path}" .; then
echo -e "✨ ${GREEN}Successfully created source tarball: ${tarball_path}${RESET}"
success "Successfully created source tarball: ${tarball_path}"
else
echo -e "❌ ${RED}Failed to create source tarball.${RESET}" >&2
return 1
error "Failed to create source tarball."
return 2
fi
}
install() {
local version
version=$(get-version) || return 1
version=$(get-version) || return $? # Propagate error
echo -e "🔧 ${CYAN}Building and installing PGRX extension v$version into local PostgreSQL...${RESET}"
info "Building and installing PGRX extension v$version into local PostgreSQL..."
# Run the pgrx install command
# It implicitly uses --release unless --debug is passed
# It finds pg_config or you can add flags like --pg-config if needed
if ! cargo pgrx install; then
echo -e "❌ ${RED}cargo pgrx install command failed.${RESET}" >&2
return 1
error "cargo pgrx install command failed."
return 2
fi
echo -e "✨ ${GREEN}PGRX extension v$version successfully built and installed.${RESET}"
success "PGRX extension v$version successfully built and installed."
# Post-install modification to allow non-superuser usage
# Get the installation path dynamically using pg_config
local pg_sharedir
pg_sharedir=$("$POSTGRES_CONFIG_PATH" --sharedir)
if [ -z "$pg_sharedir" ]; then
echo -e "❌ ${RED}Failed to determine PostgreSQL shared directory using pg_config.${RESET}" >&2
return 1
local pg_config_status=$?
if [ $pg_config_status -ne 0 ] || [ -z "$pg_sharedir" ]; then
error "Failed to determine PostgreSQL shared directory using pg_config."
return 2
fi
local installed_control_path="${pg_sharedir}/extension/jspg.control"
# Modify the control file
if [ ! -f "$installed_control_path" ]; then
echo -e "❌ ${RED}Installed control file not found: '$installed_control_path'${RESET}" >&2
return 1
error "Installed control file not found: '$installed_control_path'"
return 2
fi
echo -e "🔧 ${CYAN}Modifying control file for non-superuser access: ${installed_control_path}${RESET}"
info "Modifying control file for non-superuser access: ${installed_control_path}"
# Use sed -i '' for macOS compatibility
if sed -i '' '/^superuser = false/d' "$installed_control_path" && \
echo 'trusted = true' >> "$installed_control_path"; then
echo -e "✨ ${GREEN}Control file modified successfully.${RESET}"
success "Control file modified successfully."
else
echo -e "❌ ${RED}Failed to modify control file: ${installed_control_path}${RESET}" >&2
return 1
error "Failed to modify control file: ${installed_control_path}"
return 2
fi
}
test() {
echo -e "🧪 ${CYAN}Running jspg tests...${RESET}"
cargo pgrx test "pg${POSTGRES_VERSION}" "$@"
info "Running jspg tests..."
cargo pgrx test "pg${POSTGRES_VERSION}" "$@" || return $?
}
clean() {
echo -e "🧹 ${CYAN}Cleaning build artifacts...${RESET}"
cargo clean # Use standard cargo clean
info "Cleaning build artifacts..."
cargo clean || return $?
}
jspg-usage() {
echo -e " ${CYAN}JSPG Commands:${RESET}"
echo -e " prepare Check OS, Cargo, and PGRX dependencies."
echo -e " install [opts] Run prepare, then build and install the extension locally."
echo -e " reinstall [opts] Run prepare, clean, then build and install the extension locally."
echo -e " test [opts] Run pgrx integration tests."
echo -e " clean Remove pgrx build artifacts."
echo -e " build Build release artifacts into ./package/ (called by release)."
echo -e " tag Tag the current version (called by release)."
echo -e " package Upload artifacts from ./package/ (called by release)."
echo -e " release Perform a full release (increments patch, builds, tags, pushes, packages)."
printf "prepare\tCheck OS, Cargo, and PGRX dependencies.\n"
printf "install\tBuild and install the extension locally (after prepare).\n"
printf "reinstall\tClean, build, and install the extension locally (after prepare).\n"
printf "test\t\tRun pgrx integration tests.\n"
printf "clean\t\tRemove pgrx build artifacts.\n"
}
jspg-flow() {
case "$1" in
env) env; return 0;;
prepare) base prepare; cargo-prepare; pgrx-prepare; return 0;;
build) build; return 0;;
install) install; return 0;;
reinstall) clean; install; return 0;;
test) test; return 0;;
package) env; package; return 0;;
release) env; release; return 0;;
clean) clean; return 0;;
env) env; return $?;;
prepare) prepare && cargo-prepare && pgrx-prepare; return $?;;
build) build; return $?;;
install) install; return $?;;
reinstall) clean && install; return $?;;
test) test "${@:2}"; return $?;;
clean) clean; return $?;;
release) env; release; return $?;;
*) return 1 ;;
esac
}

2
flows

Submodule flows updated: 9d758d581e...3e3954fb79

View File

@ -48,22 +48,24 @@ fn cache_json_schema(schema_id: &str, schema: JsonB) -> JsonB {
}
Err(e) => {
let error = match &e {
CompileError::ValidationError { url: _url, src } => { // Prefix url with _
json!({
"message": format!("Schema '{}' failed validation against its metaschema: {}", schema_id, src),
"schema_path": schema_path,
"error": format!("{:?}", src),
})
CompileError::ValidationError { url: _url, src } => {
// Collect leaf errors from the meta-schema validation failure
let mut error_list = Vec::new();
collect_leaf_errors(src, &mut error_list);
// Filter and deduplicate errors, returning as a single JSON Value (Array)
json!(filter_boon_errors(error_list))
}
_ => {
let _error_type = format!("{:?}", e).split('(').next().unwrap_or("Unknown").to_string(); // Prefix error_type with _
// Keep existing handling for other compilation errors
let _error_type = format!("{:?}", e).split('(').next().unwrap_or("Unknown").to_string();
json!({
"message": format!("Schema '{}' compilation failed: {}", schema_id, e),
"schema_path": schema_path,
"error": format!("{:?}", e),
"detail": format!("{:?}", e),
})
}
};
// Ensure the outer structure remains { success: false, error: ... }
JsonB(json!({
"success": false,
"error": error
@ -91,11 +93,11 @@ fn validate_json_schema(schema_id: &str, instance: JsonB) -> JsonB {
Err(validation_error) => {
// Directly use the result of format_validation_error
// which now includes the top-level success indicator and flat error list
let mut all_errors = Vec::new();
collect_leaf_errors(&validation_error, &mut all_errors);
let mut error_list = Vec::new();
collect_leaf_errors(&validation_error, &mut error_list);
JsonB(json!({
"success": false,
"error": all_errors // Flat list of specific errors
"error": filter_boon_errors(error_list) // Filter and deduplicate errors
}))
}
}
@ -125,6 +127,51 @@ fn collect_leaf_errors(error: &ValidationError, errors_list: &mut Vec<Value>) {
}
}
// Filters collected errors, removing structural noise and then deduplicating by instance_path
fn filter_boon_errors(raw_errors: Vec<Value>) -> Vec<Value> {
use std::collections::HashMap;
use std::collections::hash_map::Entry;
// Define schema keywords that indicate structural paths, not instance paths
let structural_path_segments = [
"/allOf/", "/anyOf/", "/oneOf/",
"/if/", "/then/", "/else/",
"/not/"
// Note: "/properties/" and "/items/" are generally valid,
// but might appear spuriously in boon's paths for complex types.
// We exclude only the explicitly logical/combinatorial ones for now.
];
// 1. Filter out errors with instance_paths containing structural segments
let plausible_errors: Vec<Value> = raw_errors.into_iter().filter(|error_value| {
if let Some(instance_path_value) = error_value.get("instance_path") {
if let Some(instance_path_str) = instance_path_value.as_str() {
// Keep if NONE of the structural segments are present
!structural_path_segments.iter().any(|&segment| instance_path_str.contains(segment))
} else {
false // Invalid instance_path type, filter out
}
} else {
false // No instance_path field, filter out
}
}).collect();
// 2. Deduplicate the remaining plausible errors by instance_path
let mut unique_errors: HashMap<String, Value> = HashMap::new();
for error_value in plausible_errors {
if let Some(instance_path_value) = error_value.get("instance_path") {
if let Some(instance_path_str) = instance_path_value.as_str() {
if let Entry::Vacant(entry) = unique_errors.entry(instance_path_str.to_string()) {
entry.insert(error_value);
}
}
}
}
// Collect the unique errors
unique_errors.into_values().collect()
}
#[pg_extern(strict, parallel_safe)]
fn json_schema_cached(schema_id: &str) -> bool {
let cache = SCHEMA_CACHE.read().unwrap();

View File

@ -251,26 +251,24 @@ fn test_cache_invalid_json_schema() {
let cache_result = cache_json_schema(schema_id, jsonb(invalid_schema));
// Manually check the structure for cache_json_schema failure
let json_result = &cache_result.0;
let success = json_result.get("success").and_then(Value::as_bool);
let error_obj = json_result.get("error").and_then(Value::as_object);
// Expect 2 leaf errors because the meta-schema validation fails at the type value
// and within the type array itself.
assert_failure_with_json!(
cache_result,
2, // Expect exactly two leaf errors
"value must be one of", // Check message substring (present in both)
"Caching invalid schema should fail with specific meta-schema validation errors."
);
if success != Some(false) {
let pretty_json = serde_json::to_string_pretty(&json_result).unwrap_or_else(|_| format!("(Failed to pretty-print JSON: {:?})", json_result));
panic!("Assertion Failed (expected failure, success was not false): Caching invalid schema should fail.\nResult JSON:\n{}", pretty_json);
}
if error_obj.is_none() {
let pretty_json = serde_json::to_string_pretty(&json_result).unwrap_or_else(|_| format!("(Failed to pretty-print JSON: {:?})", json_result));
panic!("Assertion Failed (expected 'error' object, but none found): Caching invalid schema should return an error object.\nResult JSON:\n{}", pretty_json);
}
// Check specific fields within the error object
let message = error_obj.unwrap().get("message").and_then(Value::as_str);
// Updated check based on the actual error message seen in the logs
if message.map_or(true, |m| !m.contains("failed validation against its metaschema") || !m.contains("/type/0': value must be one of")) {
let pretty_json = serde_json::to_string_pretty(&json_result).unwrap_or_else(|_| format!("(Failed to pretty-print JSON: {:?})", json_result));
panic!("Assertion Failed (error message mismatch): Expected metaschema validation failure message containing '/type/0' error detail.\nResult JSON:\n{}", pretty_json);
}
// Ensure the error is an array and check specifics
let error_array = cache_result.0["error"].as_array().expect("Error field should be an array");
assert_eq!(error_array.len(), 2);
// Note: Order might vary depending on boon's internal processing, check both possibilities or sort.
// Assuming the order shown in the logs for now:
assert_eq!(error_array[0]["instance_path"], "/type");
assert!(error_array[0]["message"].as_str().unwrap().contains("value must be one of"));
assert_eq!(error_array[1]["instance_path"], "/type/0");
assert!(error_array[1]["message"].as_str().unwrap().contains("value must be one of"));
}
#[pg_test]
@ -335,32 +333,47 @@ fn test_validate_json_schema_oneof_validation_errors() {
// --- Test case 1: Fails string maxLength (in branch 0) AND missing number_prop (in branch 1) ---
let invalid_string_instance = json!({ "string_prop": "toolongstring" });
let result_invalid_string = validate_json_schema(schema_id, jsonb(invalid_string_instance));
// Expect 2 leaf errors: one for maxLength (branch 0), one for missing prop (branch 1)
// Check the first error message reported by boon (maxLength).
assert_failure_with_json!(result_invalid_string, 2, "length must be <=5", "Validation with invalid string length should have 2 leaf errors");
// Expect 2 leaf errors. Check count only with the macro.
assert_failure_with_json!(result_invalid_string, 2);
// Explicitly check that both expected errors are present, ignoring order
let errors_string = result_invalid_string.0["error"].as_array().expect("Expected error array for invalid string");
assert!(errors_string.iter().any(|e| e["instance_path"] == "/string_prop" && e["message"].as_str().unwrap().contains("length must be <=5")), "Missing maxLength error");
assert!(errors_string.iter().any(|e| e["instance_path"] == "" && e["message"].as_str().unwrap().contains("missing properties 'number_prop'")), "Missing number_prop required error");
// --- Test case 2: Fails number minimum (in branch 1) AND missing string_prop (in branch 0) ---
let invalid_number_instance = json!({ "number_prop": 5 });
let result_invalid_number = validate_json_schema(schema_id, jsonb(invalid_number_instance));
// Expect 2 leaf errors: one for minimum (branch 1), one for missing prop (branch 0)
// Check the first error message reported by boon (missing prop).
assert_failure_with_json!(result_invalid_number, 2, "missing properties 'string_prop'", "Validation with invalid number should have 2 leaf errors");
// Expect 2 leaf errors. Check count only with the macro.
assert_failure_with_json!(result_invalid_number, 2);
// Explicitly check that both expected errors are present, ignoring order
let errors_number = result_invalid_number.0["error"].as_array().expect("Expected error array for invalid number");
assert!(errors_number.iter().any(|e| e["instance_path"] == "/number_prop" && e["message"].as_str().unwrap().contains("must be >=10")), "Missing minimum error");
assert!(errors_number.iter().any(|e| e["instance_path"] == "" && e["message"].as_str().unwrap().contains("missing properties 'string_prop'")), "Missing string_prop required error");
// --- Test case 3: Fails type check (not object) for both branches ---
// Input: boolean, expected object for both branches
let invalid_bool_instance = json!(true); // Not an object
let result_invalid_bool = validate_json_schema(schema_id, jsonb(invalid_bool_instance));
// Expect 2 leaf errors, one "Type" error for each branch
// Check the first error reported by boon (want object).
assert_failure_with_json!(result_invalid_bool, 2, "want object", "Validation with invalid bool should have 2 leaf errors");
// Expect only 1 leaf error after filtering, as both original errors have instance_path ""
assert_failure_with_json!(result_invalid_bool, 1);
// Explicitly check that the single remaining error is the type error for the root instance path
let errors_bool = result_invalid_bool.0["error"].as_array().expect("Expected error array for invalid bool");
assert_eq!(errors_bool.iter().filter(|e| e["instance_path"] == "" && e["message"].as_str().unwrap().contains("want object")).count(), 1, "Expected one 'want object' error at root after filtering");
// --- Test case 4: Fails missing required for both branches ---
// Input: empty object, expected string_prop (branch 0) OR number_prop (branch 1)
let invalid_empty_obj = json!({});
let result_empty_obj = validate_json_schema(schema_id, jsonb(invalid_empty_obj));
// Expect 2 leaf errors: one required error for branch 0, one required error for branch 1
// Check the first error reported by boon (missing string_prop).
assert_failure_with_json!(result_empty_obj, 2, "missing properties 'string_prop'", "Validation with empty object should have 2 leaf errors");
// Expect only 1 leaf error after filtering, as both original errors have instance_path ""
assert_failure_with_json!(result_empty_obj, 1);
// Explicitly check that the single remaining error is one of the expected missing properties errors
let errors_empty = result_empty_obj.0["error"].as_array().expect("Expected error array for empty object");
assert_eq!(errors_empty.len(), 1, "Expected exactly one error after filtering empty object");
let the_error = &errors_empty[0];
assert_eq!(the_error["instance_path"], "", "Expected instance_path to be empty string");
let message = the_error["message"].as_str().unwrap();
assert!(message.contains("missing properties 'string_prop'") || message.contains("missing properties 'number_prop'"),
"Error message should indicate missing string_prop or number_prop, got: {}", message);
}
#[pg_test]

View File

@ -1 +1 @@
1.0.15
1.0.20