Skip to content

Commit 47180e7

Browse files
authored
Fix/bind column value parameters also if null (#6)
* fix(sync): always bind column value parameters in merge_insert_col Fix parameter binding bug in merge_insert_col that caused SQLite-to-PostgreSQL sync to fail with "there is no parameter $3" when NULL values were synced before non-NULL values for the same column. * fix(network): fix the value of the seq variable in cloudsync_payload_get when the last db_version is not related to a local change * test(postgres): new test for null value * fix(postgres): ensure NULL values use consistent decoded types for SPI plan caching When syncing NULL values first, PostgreSQL's SPI caches the prepared plan with the NULL's type. If a subsequent non-NULL value decodes to a different type, the plan fails. The fix maps column types to their decoded equivalents so NULL and non-NULL values always use consistent types (e.g., all integers use INT8OID, all floats use FLOAT8OID, most others use TEXTOID). Add map_column_oid_to_decoded_oid() to map column types to their decoded equivalents (INT2/4/8 → INT8, FLOAT4/8/NUMERIC → FLOAT8, BYTEA → BYTEA, others → TEXT). This ensures NULL and non-NULL values bind with the same type, preventing "there is no parameter $N" errors when NULL is synced before non-NULL values for the same column. Add tests 23 and 24 for UUID columns and comprehensive nullable type coverage (INT2/4/8, FLOAT4/8, NUMERIC, BYTEA, TEXT, VARCHAR, CHAR, UUID, JSON, JSONB, DATE, TIMESTAMP). * fix(postgres): add bigint to boolean cast for BOOLEAN column sync BOOLEAN values are encoded as INT8 in sync payloads for SQLite interoperability, but PostgreSQL has no built-in cast from bigint to boolean. Add a custom ASSIGNMENT cast that enables BOOLEAN columns to sync correctly. The cast uses ASSIGNMENT context (not IMPLICIT) to avoid unintended conversions in WHERE clauses while still enabling INSERT/UPDATE operations used by merge_insert. The write direction (BOOL → INT encoding flow) "just works" because DatumGetBool() naturally returns 0 or 1. The problem was only on the read side where PostgreSQL refused to cast the decoded INT8 back to BOOLEAN without our custom cast. * feat(commands): add sync roundtrip RLS test guide * bump version * ci: add postgres-test job to main workflow * ci(Makefile.postgresql): replaced all docker-compose commands with docker compose (v2 plugin syntax) Fix the execution in the github actions runner: The GitHub Actions runner has docker compose (v2 plugin) but not the standalone docker-compose (v1) * ci: fix the "run postgresql tests" step
1 parent 8bf4c58 commit 47180e7

16 files changed

+2641
-32
lines changed

.claude/commands/test-sync-roundtrip-rls.md

Lines changed: 532 additions & 0 deletions
Large diffs are not rendered by default.

.github/workflows/main.yml

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,10 +226,41 @@ jobs:
226226
path: dist/${{ matrix.name == 'apple-xcframework' && 'CloudSync.*' || 'cloudsync.*'}}
227227
if-no-files-found: error
228228

229+
postgres-test:
230+
runs-on: ubuntu-22.04
231+
name: postgresql build + test
232+
timeout-minutes: 10
233+
234+
steps:
235+
236+
- uses: actions/checkout@v4.2.2
237+
238+
- name: build and start postgresql container
239+
run: make postgres-docker-rebuild
240+
241+
- name: wait for postgresql to be ready
242+
run: |
243+
for i in $(seq 1 30); do
244+
if docker exec cloudsync-postgres pg_isready -U postgres > /dev/null 2>&1; then
245+
echo "PostgreSQL is ready"
246+
exit 0
247+
fi
248+
sleep 2
249+
done
250+
echo "PostgreSQL failed to start within 60s"
251+
docker logs cloudsync-postgres
252+
exit 1
253+
254+
- name: run postgresql tests
255+
run: |
256+
docker exec cloudsync-postgres mkdir -p /tmp/cloudsync/test
257+
docker cp test/postgresql cloudsync-postgres:/tmp/cloudsync/test/postgresql
258+
docker exec cloudsync-postgres psql -U postgres -d postgres -f /tmp/cloudsync/test/postgresql/full_test.sql
259+
229260
release:
230261
runs-on: ubuntu-22.04
231262
name: release
232-
needs: build
263+
needs: [build, postgres-test]
233264
if: github.ref == 'refs/heads/main'
234265

235266
env:

docker/Makefile.postgresql

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -137,32 +137,32 @@ PG_DOCKER_DB_PASSWORD ?= postgres
137137

138138
# Build Docker image with pre-installed extension
139139
postgres-docker-build:
140-
@echo "Building Docker image via docker-compose (rebuilt when sources change)..."
140+
@echo "Building Docker image via docker compose (rebuilt when sources change)..."
141141
# To force plaintext BuildKit logs, run: make postgres-docker-build DOCKER_BUILD_ARGS="--progress=plain"
142-
cd docker/postgresql && docker-compose build $(DOCKER_BUILD_ARGS)
142+
cd docker/postgresql && docker compose build $(DOCKER_BUILD_ARGS)
143143
@echo ""
144144
@echo "Docker image built successfully!"
145145

146146
# Build Docker image with AddressSanitizer enabled (override compose file)
147147
postgres-docker-build-asan:
148-
@echo "Building Docker image with ASAN via docker-compose..."
148+
@echo "Building Docker image with ASAN via docker compose..."
149149
# To force plaintext BuildKit logs, run: make postgres-docker-build-asan DOCKER_BUILD_ARGS=\"--progress=plain\"
150-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml -f docker-compose.asan.yml build $(DOCKER_BUILD_ARGS)
150+
cd docker/postgresql && docker compose -f docker-compose.debug.yml -f docker-compose.asan.yml build $(DOCKER_BUILD_ARGS)
151151
@echo ""
152152
@echo "ASAN Docker image built successfully!"
153153

154154
# Build Docker image using docker-compose.debug.yml
155155
postgres-docker-debug-build:
156-
@echo "Building debug Docker image via docker-compose..."
156+
@echo "Building debug Docker image via docker compose..."
157157
# To force plaintext BuildKit logs, run: make postgres-docker-debug-build DOCKER_BUILD_ARGS=\"--progress=plain\"
158-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml build $(DOCKER_BUILD_ARGS)
158+
cd docker/postgresql && docker compose -f docker-compose.debug.yml build $(DOCKER_BUILD_ARGS)
159159
@echo ""
160160
@echo "Debug Docker image built successfully!"
161161

162162
# Run PostgreSQL container with CloudSync
163163
postgres-docker-run:
164164
@echo "Starting PostgreSQL with CloudSync..."
165-
cd docker/postgresql && docker-compose up -d --build
165+
cd docker/postgresql && docker compose up -d --build
166166
@echo ""
167167
@echo "Container started successfully!"
168168
@echo ""
@@ -179,7 +179,7 @@ postgres-docker-run:
179179
# Run PostgreSQL container with CloudSync and AddressSanitizer enabled
180180
postgres-docker-run-asan:
181181
@echo "Starting PostgreSQL with CloudSync (ASAN enabled)..."
182-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml -f docker-compose.asan.yml up -d --build
182+
cd docker/postgresql && docker compose -f docker-compose.debug.yml -f docker-compose.asan.yml up -d --build
183183
@echo ""
184184
@echo "Container started successfully!"
185185
@echo ""
@@ -196,7 +196,7 @@ postgres-docker-run-asan:
196196
# Run PostgreSQL container using docker-compose.debug.yml
197197
postgres-docker-debug-run:
198198
@echo "Starting PostgreSQL with CloudSync (debug compose)..."
199-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml up -d --build
199+
cd docker/postgresql && docker compose -f docker-compose.debug.yml up -d --build
200200
@echo ""
201201
@echo "Container started successfully!"
202202
@echo ""
@@ -213,21 +213,21 @@ postgres-docker-debug-run:
213213
# Stop PostgreSQL container
214214
postgres-docker-stop:
215215
@echo "Stopping PostgreSQL container..."
216-
cd docker/postgresql && docker-compose down
216+
cd docker/postgresql && docker compose down
217217
@echo "Container stopped"
218218

219219
# Rebuild and restart container
220220
postgres-docker-rebuild: postgres-docker-build
221221
@echo "Rebuilding and restarting container..."
222-
cd docker/postgresql && docker-compose down
223-
cd docker/postgresql && docker-compose up -d --build
222+
cd docker/postgresql && docker compose down
223+
cd docker/postgresql && docker compose up -d --build
224224
@echo "Container restarted with new image"
225225

226226
# Rebuild and restart container using docker-compose.debug.yml
227227
postgres-docker-debug-rebuild: postgres-docker-debug-build
228228
@echo "Rebuilding and restarting debug container..."
229-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml down
230-
cd docker/postgresql && docker-compose -f docker-compose.debug.yml up -d --build
229+
cd docker/postgresql && docker compose -f docker-compose.debug.yml down
230+
cd docker/postgresql && docker compose -f docker-compose.debug.yml up -d --build
231231
@echo "Debug container restarted with new image"
232232

233233
# Interactive shell in container
@@ -353,5 +353,5 @@ postgres-help:
353353
# Simple smoke test: rebuild image/container, create extension, and query version
354354
unittest-pg: postgres-docker-rebuild
355355
@echo "Running PostgreSQL extension smoke test..."
356-
cd docker/postgresql && docker-compose exec -T postgres psql -U postgres -d cloudsync_test -f /tmp/cloudsync/docker/postgresql/smoke_test.sql
356+
cd docker/postgresql && docker compose exec -T postgres psql -U postgres -d cloudsync_test -f /tmp/cloudsync/docker/postgresql/smoke_test.sql
357357
@echo "Smoke test completed."

src/cloudsync.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1204,18 +1204,20 @@ int merge_insert_col (cloudsync_context *data, cloudsync_table_context *table, c
12041204
return rc;
12051205
}
12061206

1207-
// bind value
1207+
// bind value (always bind all expected parameters for correct prepared statement handling)
12081208
if (col_value) {
12091209
rc = databasevm_bind_value(vm, table->npks+1, col_value);
12101210
if (rc == DBRES_OK) rc = databasevm_bind_value(vm, table->npks+2, col_value);
1211-
if (rc != DBRES_OK) {
1212-
cloudsync_set_dberror(data);
1213-
dbvm_reset(vm);
1214-
return rc;
1215-
}
1216-
1211+
} else {
1212+
rc = databasevm_bind_null(vm, table->npks+1);
1213+
if (rc == DBRES_OK) rc = databasevm_bind_null(vm, table->npks+2);
12171214
}
1218-
1215+
if (rc != DBRES_OK) {
1216+
cloudsync_set_dberror(data);
1217+
dbvm_reset(vm);
1218+
return rc;
1219+
}
1220+
12191221
// perform real operation and disable triggers
12201222

12211223
// in case of GOS we reused the table->col_merge_stmt statement
@@ -2442,8 +2444,8 @@ int cloudsync_payload_get (cloudsync_context *data, char **blob, int *blob_size,
24422444

24432445
// retrieve BLOB
24442446
char sql[1024];
2445-
snprintf(sql, sizeof(sql), "WITH max_db_version AS (SELECT MAX(db_version) AS max_db_version FROM cloudsync_changes) "
2446-
"SELECT * FROM (SELECT cloudsync_payload_encode(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq) AS payload, max_db_version AS max_db_version, MAX(IIF(db_version = max_db_version, seq, NULL)) FROM cloudsync_changes, max_db_version WHERE site_id=cloudsync_siteid() AND (db_version>%d OR (db_version=%d AND seq>%d))) WHERE payload IS NOT NULL", *db_version, *db_version, *seq);
2447+
snprintf(sql, sizeof(sql), "WITH max_db_version AS (SELECT MAX(db_version) AS max_db_version FROM cloudsync_changes WHERE site_id=cloudsync_siteid()) "
2448+
"SELECT * FROM (SELECT cloudsync_payload_encode(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq) AS payload, max_db_version AS max_db_version, MAX(IIF(db_version = max_db_version, seq, 0)) FROM cloudsync_changes, max_db_version WHERE site_id=cloudsync_siteid() AND (db_version>%d OR (db_version=%d AND seq>%d))) WHERE payload IS NOT NULL", *db_version, *db_version, *seq);
24472449

24482450
int64_t len = 0;
24492451
int rc = database_select_blob_2int(data, sql, blob, &len, new_db_version, new_seq);

src/cloudsync.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
extern "C" {
1818
#endif
1919

20-
#define CLOUDSYNC_VERSION "0.9.101"
20+
#define CLOUDSYNC_VERSION "0.9.102"
2121
#define CLOUDSYNC_MAX_TABLENAME_LEN 512
2222

2323
#define CLOUDSYNC_VALUE_NOTSET -1

src/postgresql/cloudsync--1.0.sql

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,3 +276,21 @@ CREATE OR REPLACE FUNCTION cloudsync_table_schema(table_name text)
276276
RETURNS text
277277
AS 'MODULE_PATHNAME', 'pg_cloudsync_table_schema'
278278
LANGUAGE C VOLATILE;
279+
280+
-- ============================================================================
281+
-- Type Casts
282+
-- ============================================================================
283+
284+
-- Cast function: converts bigint to boolean (0 = false, non-zero = true)
285+
-- Required because BOOLEAN values are encoded as INT8 in sync payloads,
286+
-- but PostgreSQL has no built-in cast from bigint to boolean.
287+
CREATE FUNCTION cloudsync_int8_to_bool(bigint) RETURNS boolean AS $$
288+
SELECT $1 <> 0
289+
$$ LANGUAGE SQL IMMUTABLE STRICT;
290+
291+
-- ASSIGNMENT cast: auto-applies in INSERT/UPDATE context only
292+
-- This enables BOOLEAN column sync where values are encoded as INT8.
293+
-- Using ASSIGNMENT (not IMPLICIT) to avoid unintended conversions in WHERE clauses.
294+
CREATE CAST (bigint AS boolean)
295+
WITH FUNCTION cloudsync_int8_to_bool(bigint)
296+
AS ASSIGNMENT;

src/postgresql/cloudsync_postgresql.c

Lines changed: 87 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1638,6 +1638,77 @@ static int cloudsync_decode_value_cb (void *xdata, int index, int type, int64_t
16381638
return DBRES_OK;
16391639
}
16401640

1641+
// Map a column Oid to the decoded type Oid that would be used for non-NULL values.
1642+
// This ensures NULL and non-NULL values use consistent types for SPI plan caching.
1643+
// The mapping must match pgvalue_dbtype() in pgvalue.c which determines encode/decode types.
1644+
// For example, INT4OID columns decode to INT8OID, UUIDOID columns decode to TEXTOID.
1645+
static Oid map_column_oid_to_decoded_oid(Oid col_oid) {
1646+
switch (col_oid) {
1647+
// Integer types → INT8OID (all integers decode to int64)
1648+
// Must match DBTYPE_INTEGER cases in pgvalue_dbtype()
1649+
case INT2OID:
1650+
case INT4OID:
1651+
case INT8OID:
1652+
case BOOLOID: // BOOLEAN encodes/decodes as INTEGER
1653+
case CHAROID: // "char" encodes/decodes as INTEGER
1654+
case OIDOID: // OID encodes/decodes as INTEGER
1655+
return INT8OID;
1656+
// Float types → FLOAT8OID (all floats decode to double)
1657+
// Must match DBTYPE_FLOAT cases in pgvalue_dbtype()
1658+
case FLOAT4OID:
1659+
case FLOAT8OID:
1660+
case NUMERICOID:
1661+
return FLOAT8OID;
1662+
// Binary types → BYTEAOID
1663+
// Must match DBTYPE_BLOB cases in pgvalue_dbtype()
1664+
case BYTEAOID:
1665+
return BYTEAOID;
1666+
// All other types (text, varchar, uuid, json, date, timestamp, etc.) → TEXTOID
1667+
// These all encode/decode as DBTYPE_TEXT
1668+
default:
1669+
return TEXTOID;
1670+
}
1671+
}
1672+
1673+
// Get the Oid of a column from the system catalog.
1674+
// Requires SPI to be connected. Returns InvalidOid if not found.
1675+
static Oid get_column_oid(const char *schema, const char *table_name, const char *column_name) {
1676+
if (!table_name || !column_name) return InvalidOid;
1677+
1678+
const char *query =
1679+
"SELECT a.atttypid "
1680+
"FROM pg_attribute a "
1681+
"JOIN pg_class c ON c.oid = a.attrelid "
1682+
"LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
1683+
"WHERE c.relname = $1 "
1684+
"AND a.attname = $2 "
1685+
"AND a.attnum > 0 "
1686+
"AND NOT a.attisdropped "
1687+
"AND (n.nspname = $3 OR $3 IS NULL)";
1688+
1689+
Oid argtypes[3] = {TEXTOID, TEXTOID, TEXTOID};
1690+
Datum values[3];
1691+
char nulls[3] = {' ', ' ', schema ? ' ' : 'n'};
1692+
1693+
values[0] = CStringGetTextDatum(table_name);
1694+
values[1] = CStringGetTextDatum(column_name);
1695+
values[2] = schema ? CStringGetTextDatum(schema) : (Datum)0;
1696+
1697+
int ret = SPI_execute_with_args(query, 3, argtypes, values, nulls, true, 1);
1698+
1699+
pfree(DatumGetPointer(values[0]));
1700+
pfree(DatumGetPointer(values[1]));
1701+
if (schema) pfree(DatumGetPointer(values[2]));
1702+
1703+
if (ret != SPI_OK_SELECT || SPI_processed == 0) return InvalidOid;
1704+
1705+
bool isnull;
1706+
Datum col_oid = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull);
1707+
if (isnull) return InvalidOid;
1708+
1709+
return DatumGetObjectId(col_oid);
1710+
}
1711+
16411712
// Decode encoded bytea into a pgvalue_t with the decoded base type.
16421713
// Type casting to the target column type is handled by the SQL statement.
16431714
static pgvalue_t *cloudsync_decode_bytea_to_pgvalue (bytea *encoded, bool *out_isnull) {
@@ -2247,9 +2318,23 @@ Datum cloudsync_changes_insert_trigger (PG_FUNCTION_ARGS) {
22472318
if (SPI_connect() != SPI_OK_CONNECT) ereport(ERROR, (errmsg("cloudsync: SPI_connect failed in trigger")));
22482319
spi_connected = true;
22492320

2250-
// Decode value to base type; SQL statement handles type casting via $n::typename
2321+
// Decode value to base type; SQL statement handles type casting via $n::typename.
2322+
// For non-NULL values, we get the decoded base type (INT8OID for integers, TEXTOID for text/UUID, etc).
2323+
// For NULL values, we must use the SAME decoded type that non-NULL values would use.
2324+
// This ensures type consistency across all calls, as SPI caches parameter types on first prepare.
22512325
if (!is_tombstone) {
2252-
col_value = cloudsync_decode_bytea_to_pgvalue(insert_value_encoded, NULL);
2326+
bool value_is_null = false;
2327+
col_value = cloudsync_decode_bytea_to_pgvalue(insert_value_encoded, &value_is_null);
2328+
2329+
// When value is NULL, create a typed NULL pgvalue with the decoded type.
2330+
// We map the column's actual Oid to the corresponding decoded Oid (e.g., INT4OID → INT8OID).
2331+
if (!col_value && value_is_null) {
2332+
Oid col_oid = get_column_oid(table_schema(table), insert_tbl, insert_name);
2333+
if (OidIsValid(col_oid)) {
2334+
Oid decoded_oid = map_column_oid_to_decoded_oid(col_oid);
2335+
col_value = pgvalue_create((Datum)0, decoded_oid, -1, InvalidOid, true);
2336+
}
2337+
}
22532338
}
22542339

22552340
int rc = DBRES_OK;

src/postgresql/database_postgresql.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2177,7 +2177,7 @@ int databasevm_bind_null (dbvm_t *vm, int index) {
21772177

21782178
pg_stmt_t *stmt = (pg_stmt_t*)vm;
21792179
stmt->values[idx] = (Datum)0;
2180-
stmt->types[idx] = BYTEAOID;
2180+
stmt->types[idx] = TEXTOID; // TEXTOID has casts to most types
21812181
stmt->nulls[idx] = 'n';
21822182

21832183
if (stmt->nparams < idx + 1) stmt->nparams = idx + 1;
@@ -2222,7 +2222,8 @@ int databasevm_bind_value (dbvm_t *vm, int index, dbvalue_t *value) {
22222222
pgvalue_t *v = (pgvalue_t *)value;
22232223
if (!v || v->isnull) {
22242224
stmt->values[idx] = (Datum)0;
2225-
stmt->types[idx] = TEXTOID;
2225+
// Use the actual column type if available, otherwise default to TEXTOID
2226+
stmt->types[idx] = (v && OidIsValid(v->typeid)) ? v->typeid : TEXTOID;
22262227
stmt->nulls[idx] = 'n';
22272228
} else {
22282229
int16 typlen;

test/postgresql/01_unittest.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ SELECT cloudsync_version() AS version \gset
2121

2222
-- Test uuid generation
2323
SELECT cloudsync_uuid() AS uuid1 \gset
24-
SELECT pg_sleep(0.1);
24+
SELECT pg_sleep(0.1) \gset
2525
SELECT cloudsync_uuid() AS uuid2 \gset
2626

2727
-- Test 1: Format check (UUID v7 has standard format: xxxxxxxx-xxxx-7xxx-xxxx-xxxxxxxxxxxx)

0 commit comments

Comments
 (0)