From d07d0f22ff1f884662aa0abdcc013a4626f57ec0 Mon Sep 17 00:00:00 2001
From: adela
Date: Fri, 10 Apr 2026 16:54:05 +0200
Subject: [PATCH 1/2] add 1205 1366 22p02 error code
---
.../error/1205-lock-wait-timeout-exceeded.md | 228 +++++++++++++++
.../error/1366-incorrect-string-value.md | 201 ++++++++++++++
.../22p02-invalid-input-syntax-postgres.md | 262 ++++++++++++++++++
3 files changed, 691 insertions(+)
create mode 100644 content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
create mode 100644 content/reference/mysql/error/1366-incorrect-string-value.md
create mode 100644 content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
diff --git a/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md b/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
new file mode 100644
index 00000000..c9eb6a87
--- /dev/null
+++ b/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
@@ -0,0 +1,228 @@
+---
+title: 'ERROR 1205 (HY000): Lock Wait Timeout Exceeded in MySQL'
+---
+
+## Error Message
+
+```sql
+ERROR 1205 (HY000): Lock wait timeout exceeded; try restarting transaction
+```
+
+## What Triggers This Error
+
+MySQL 1205 fires when a transaction waits longer than `innodb_lock_wait_timeout` seconds (default: 50) to acquire a row lock held by another transaction. Unlike a deadlock (ERROR 1213), MySQL does not automatically detect this — it simply gives up waiting. The fix depends on why the lock is held so long:
+
+- **Long-running transaction holding locks** — an uncommitted transaction keeps row locks open
+- **Bulk UPDATE or DELETE blocking other transactions** — a large write locks thousands of rows
+- **Foreign key checks causing implicit locks on the parent table** — InnoDB reads the parent row with a shared lock
+- **`innodb_lock_wait_timeout` too low for batch operations** — the default 50s isn't enough for heavy workloads
+- **Circular wait that wasn't detected as a deadlock** — rare edge case where the wait graph check missed the cycle
+
+## Fix by Scenario
+
+### Long-running transaction holding locks
+
+The most common cause. A transaction ran a `SELECT ... FOR UPDATE` or an `UPDATE`, then never committed — maybe the application crashed, a developer left a session open, or a retry loop is stuck.
+
+```sql
+-- Find the blocking transaction
+SELECT
+ r.trx_id AS waiting_trx_id,
+ r.trx_mysql_thread_id AS waiting_thread,
+ r.trx_query AS waiting_query,
+ b.trx_id AS blocking_trx_id,
+ b.trx_mysql_thread_id AS blocking_thread,
+ b.trx_query AS blocking_query,
+ b.trx_started AS blocking_since
+FROM information_schema.INNODB_LOCK_WAITS w
+JOIN information_schema.INNODB_TRX b ON b.trx_id = w.blocking_trx_id
+JOIN information_schema.INNODB_TRX r ON r.trx_id = w.requesting_trx_id;
+```
+
+For MySQL 8.0+, use the `performance_schema` instead:
+
+```sql
+SELECT
+ waiting.THREAD_ID AS waiting_thread,
+ waiting.SQL_TEXT AS waiting_query,
+ blocking.THREAD_ID AS blocking_thread,
+ blocking.SQL_TEXT AS blocking_query
+FROM performance_schema.data_lock_waits w
+JOIN performance_schema.events_statements_current waiting
+ ON waiting.THREAD_ID = w.REQUESTING_THREAD_ID
+JOIN performance_schema.events_statements_current blocking
+ ON blocking.THREAD_ID = w.BLOCKING_THREAD_ID;
+```
+
+**Fix:**
+
+1. Kill the blocking session if it's idle or stuck:
+
+```sql
+-- Check if the blocking thread is doing anything
+SHOW PROCESSLIST;
+
+-- Kill the idle blocker (use the blocking_thread from above)
+KILL 12345;
+```
+
+2. Fix the application to commit or rollback promptly:
+
+```python
+# Bad: connection stays open with uncommitted transaction
+cursor.execute("UPDATE orders SET status = 'processing' WHERE id = %s", (order_id,))
+result = call_payment_api(order_id) # 60 seconds — locks held the entire time
+cursor.execute("UPDATE orders SET status = %s WHERE id = %s", (result, order_id))
+connection.commit()
+
+# Good: separate transactions
+cursor.execute("UPDATE orders SET status = 'processing' WHERE id = %s", (order_id,))
+connection.commit() # release locks immediately
+
+result = call_payment_api(order_id) # locks are free
+
+cursor.execute("UPDATE orders SET status = %s WHERE id = %s", (result, order_id))
+connection.commit()
+```
+
+### Bulk UPDATE or DELETE blocking other transactions
+
+A single `UPDATE` or `DELETE` affecting thousands of rows locks them all for the duration of the statement. Other transactions waiting for any of those rows will time out.
+
+```sql
+-- This locks every row in the table for the entire execution
+UPDATE orders SET status = 'archived' WHERE created_at < '2025-01-01';
+-- Could take minutes — every other transaction touching `orders` waits
+```
+
+**Fix:** Break the operation into smaller batches:
+
+```sql
+-- Process 1000 rows at a time
+SET @batch_size = 1000;
+SET @rows_affected = 1;
+
+WHILE @rows_affected > 0 DO
+ UPDATE orders SET status = 'archived'
+ WHERE created_at < '2025-01-01' AND status != 'archived'
+ LIMIT 1000;
+ SET @rows_affected = ROW_COUNT();
+ -- Brief pause to let other transactions through
+END WHILE;
+```
+
+Or in application code:
+
+```python
+batch_size = 1000
+while True:
+ cursor.execute("""
+ UPDATE orders SET status = 'archived'
+ WHERE created_at < '2025-01-01' AND status != 'archived'
+ LIMIT %s
+ """, (batch_size,))
+ connection.commit()
+ if cursor.rowcount == 0:
+ break
+ time.sleep(0.1) # let other transactions acquire locks
+```
+
+### Foreign key checks causing implicit locks on parent table
+
+When you INSERT into a child table with a foreign key, InnoDB places a shared lock on the parent row to verify it exists. If another transaction holds an exclusive lock on that parent row, the child INSERT waits.
+
+```sql
+-- Transaction A: updates a customer (holds exclusive lock on id=42)
+START TRANSACTION;
+UPDATE customers SET name = 'New Name' WHERE id = 42;
+-- Does NOT commit yet
+
+-- Transaction B: inserts an order for that customer (needs shared lock on customers.id=42)
+INSERT INTO orders (customer_id, total) VALUES (42, 99.99);
+-- Waits... and eventually ERROR 1205
+```
+
+**Fix:**
+
+1. Keep the parent update transaction short — commit before the child insert needs the row
+2. If the parent update is part of a batch, process it in smaller chunks
+3. If FK validation isn't needed during bulk inserts, temporarily disable it:
+
+```sql
+-- Only for controlled batch operations — not for regular application use
+SET FOREIGN_KEY_CHECKS = 0;
+-- ... bulk inserts ...
+SET FOREIGN_KEY_CHECKS = 1;
+```
+
+### `innodb_lock_wait_timeout` set too low
+
+The default is 50 seconds, which is usually enough. But batch jobs, reporting queries, or migration scripts may legitimately need longer.
+
+```sql
+-- Check the current timeout
+SELECT @@innodb_lock_wait_timeout;
+
+-- Increase for the current session only (for a batch job)
+SET SESSION innodb_lock_wait_timeout = 300; -- 5 minutes
+
+-- Or increase globally (requires careful consideration)
+SET GLOBAL innodb_lock_wait_timeout = 120;
+```
+
+**Fix:** Set it per-session for batch operations rather than changing the global default. A high global timeout means genuine lock problems take longer to surface.
+
+### Circular wait not detected as deadlock
+
+Rarely, InnoDB's deadlock detector misses a cycle — especially with complex multi-table lock chains or when `innodb_deadlock_detect` is disabled (some high-concurrency setups turn it off for performance).
+
+```sql
+-- Check if deadlock detection is enabled
+SELECT @@innodb_deadlock_detect;
+
+-- Check the latest detected deadlock
+SHOW ENGINE INNODB STATUS;
+-- Look for the "LATEST DETECTED DEADLOCK" section
+```
+
+**Fix:**
+
+1. Re-enable deadlock detection if it was turned off:
+
+```sql
+SET GLOBAL innodb_deadlock_detect = ON;
+```
+
+2. Add application-level retry logic for 1205 errors (same pattern as deadlock retries):
+
+```python
+max_retries = 3
+for attempt in range(max_retries):
+ try:
+ cursor.execute("START TRANSACTION")
+ cursor.execute("UPDATE accounts SET balance = balance - 100 WHERE id = 1")
+ cursor.execute("UPDATE accounts SET balance = balance + 100 WHERE id = 2")
+ connection.commit()
+ break
+ except mysql.connector.Error as err:
+ connection.rollback()
+ if err.errno in (1205, 1213) and attempt < max_retries - 1:
+ time.sleep(2 ** attempt)
+ else:
+ raise
+```
+
+## Prevention
+
+- Commit transactions as quickly as possible — never hold locks while waiting for external API calls or user input
+- Break large UPDATE/DELETE operations into batches of 1000-5000 rows
+- Add indexes on columns used in WHERE clauses to avoid full table scans that escalate lock scope
+- Use `SET SESSION innodb_lock_wait_timeout` for batch jobs instead of raising the global default
+- Monitor `INNODB_TRX` for transactions running longer than expected and alert on them
+- Always implement retry logic for 1205 and 1213 errors in application code
+
+
+
+Bytebase's [SQL Review](https://docs.bytebase.com/sql-review/review-rules/) can flag large UPDATE/DELETE statements without LIMIT during change review, preventing bulk operations from causing lock contention. See also [ERROR 1213: Deadlock Found](/reference/mysql/error/1213-deadlock-found) for deadlock-specific troubleshooting.
+
+
diff --git a/content/reference/mysql/error/1366-incorrect-string-value.md b/content/reference/mysql/error/1366-incorrect-string-value.md
new file mode 100644
index 00000000..a4b16833
--- /dev/null
+++ b/content/reference/mysql/error/1366-incorrect-string-value.md
@@ -0,0 +1,201 @@
+---
+title: "ERROR 1366 (HY000): Incorrect String Value in MySQL"
+---
+
+## Error Message
+
+```sql
+ERROR 1366 (HY000): Incorrect string value: '\xF0\x9F\x98\x80' for column 'name' at row 1
+```
+
+Other common variations:
+
+```sql
+ERROR 1366 (HY000): Incorrect string value: '\xE4\xB8\xAD' for column 'description' at row 1
+ERROR 1366 (HY000): Incorrect string value: '\x80\x81\x82' for column 'content' at row 1
+```
+
+## What Triggers This Error
+
+MySQL 1366 fires when a string value contains bytes that cannot be represented in the column's character set. The most common case: inserting emoji or 4-byte Unicode characters into a `utf8` (3-byte) column instead of `utf8mb4` (4-byte). The fix depends on where the charset mismatch occurs:
+
+- **Emoji or 4-byte characters into a `utf8` column** — the column needs `utf8mb4`
+- **Connection charset doesn't match column charset** — the client sends bytes the server can't interpret
+- **Data migration with unconverted binary data** — migrating from `latin1` to `utf8` without converting the actual bytes
+- **Application sends raw bytes without specifying charset** — the driver defaults to a charset that doesn't match the data
+- **`LOAD DATA INFILE` with wrong `CHARACTER SET` clause** — the file encoding doesn't match what MySQL expects
+
+## Fix by Scenario
+
+### Emoji or 4-byte characters into a `utf8` column
+
+MySQL's `utf8` is actually `utf8mb3` — it only supports characters up to 3 bytes. Emoji, some CJK characters, and mathematical symbols are 4 bytes and require `utf8mb4`.
+
+```sql
+-- Check the column's character set
+SELECT COLUMN_NAME, CHARACTER_SET_NAME, COLLATION_NAME
+FROM information_schema.COLUMNS
+WHERE TABLE_SCHEMA = 'mydb' AND TABLE_NAME = 'users' AND COLUMN_NAME = 'name';
+
+-- If it shows 'utf8' (not 'utf8mb4'), that's the problem
+```
+
+**Fix:** Convert the column (or the entire table) to `utf8mb4`:
+
+```sql
+-- Convert a single column
+ALTER TABLE users MODIFY name VARCHAR(255)
+ CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+
+-- Convert the entire table
+ALTER TABLE users CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+
+-- Convert the database default (for new tables)
+ALTER DATABASE mydb CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+```
+
+**Important:** If you have indexes on the column and the column is `VARCHAR(255)`, switching to `utf8mb4` increases the index key size from 765 bytes (255 × 3) to 1020 bytes (255 × 4). If this exceeds the InnoDB index key limit (3072 bytes for `DYNAMIC`/`COMPRESSED` row format, 767 bytes for `COMPACT`/`REDUNDANT`), reduce the column length or use a prefix index:
+
+```sql
+-- Check your row format
+SELECT TABLE_NAME, ROW_FORMAT FROM information_schema.TABLES
+WHERE TABLE_SCHEMA = 'mydb' AND TABLE_NAME = 'users';
+
+-- If COMPACT/REDUNDANT, you may need a prefix index
+ALTER TABLE users MODIFY name VARCHAR(191)
+ CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+-- 191 × 4 = 764 bytes, just under the 767-byte limit
+```
+
+### Connection charset doesn't match column charset
+
+Even if the column is `utf8mb4`, the connection must also use `utf8mb4`. Otherwise MySQL tries to convert the bytes from the connection charset to the column charset and fails.
+
+```sql
+-- Check the current connection charset
+SHOW VARIABLES LIKE 'character_set%';
+
+-- Look for:
+-- character_set_client = utf8mb4
+-- character_set_connection = utf8mb4
+-- character_set_results = utf8mb4
+```
+
+**Fix:** Set the connection charset when connecting:
+
+```python
+# Python with mysql-connector
+connection = mysql.connector.connect(
+ host='localhost',
+ user='root',
+ database='mydb',
+ charset='utf8mb4',
+ collation='utf8mb4_unicode_ci'
+)
+
+# Python with SQLAlchemy
+engine = create_engine(
+ 'mysql+pymysql://root@localhost/mydb?charset=utf8mb4'
+)
+```
+
+```java
+// Java JDBC
+String url = "jdbc:mysql://localhost/mydb?characterEncoding=utf8mb4&connectionCollation=utf8mb4_unicode_ci";
+```
+
+Or set it per-session:
+
+```sql
+SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_ci';
+```
+
+### Data migration from `latin1` to `utf8` with unconverted binary data
+
+A common trap: a `latin1` column that actually stores UTF-8 bytes (because the application wrote UTF-8 through a `latin1` connection). When you `ALTER TABLE ... CONVERT TO CHARACTER SET utf8mb4`, MySQL re-encodes the bytes — double-encoding them and producing garbage or errors.
+
+```sql
+-- Check if the data is actually UTF-8 stored in latin1
+SELECT name, HEX(name) FROM users WHERE id = 1;
+-- If you see valid UTF-8 byte sequences (E4 B8 AD for 中), it's double-encoded
+```
+
+**Fix:** Convert via `BINARY` to preserve the raw bytes:
+
+```sql
+-- Step 1: convert to BINARY (strips charset metadata, preserves bytes)
+ALTER TABLE users MODIFY name VARBINARY(255);
+
+-- Step 2: convert from BINARY to utf8mb4 (interprets bytes as UTF-8)
+ALTER TABLE users MODIFY name VARCHAR(255)
+ CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+```
+
+**Always back up the table before this operation.** If the bytes are not actually valid UTF-8, step 2 will fail or produce incorrect data.
+
+### Application sends raw bytes without specifying charset
+
+Some applications or scripts write raw bytes to MySQL without setting the connection charset. MySQL interprets them using the server default (`character_set_server`), which may not match.
+
+```sql
+-- Check the server default
+SHOW VARIABLES LIKE 'character_set_server';
+-- If this is 'latin1' but your app sends UTF-8, you'll get 1366
+```
+
+**Fix:**
+
+1. Set `character_set_server` to `utf8mb4` in `my.cnf`:
+
+```ini
+[mysqld]
+character-set-server = utf8mb4
+collation-server = utf8mb4_unicode_ci
+
+[client]
+default-character-set = utf8mb4
+```
+
+2. Restart MySQL for the changes to take effect
+3. For existing connections, set charset explicitly (see the connection charset fix above)
+
+### `LOAD DATA INFILE` with wrong `CHARACTER SET` clause
+
+When importing a file, MySQL needs to know the file's encoding. If the file is UTF-8 but you don't specify that, MySQL uses the connection charset or `character_set_database`.
+
+```sql
+-- This may fail if the file contains UTF-8 but the connection charset is latin1
+LOAD DATA INFILE '/tmp/data.csv' INTO TABLE users
+FIELDS TERMINATED BY ',' ENCLOSED BY '"'
+LINES TERMINATED BY '\n';
+-- ERROR 1366: Incorrect string value
+```
+
+**Fix:** Specify the file's character set explicitly:
+
+```sql
+LOAD DATA INFILE '/tmp/data.csv' INTO TABLE users
+CHARACTER SET utf8mb4
+FIELDS TERMINATED BY ',' ENCLOSED BY '"'
+LINES TERMINATED BY '\n';
+```
+
+For `mysqlimport`:
+
+```bash
+mysqlimport --default-character-set=utf8mb4 mydb /tmp/users.txt
+```
+
+## Prevention
+
+- Use `utf8mb4` everywhere — server, database, table, column, and connection. MySQL's `utf8` is a legacy alias for the 3-byte subset and should be considered deprecated
+- Set `charset=utf8mb4` in all application connection strings
+- Add `character-set-server=utf8mb4` and `collation-server=utf8mb4_unicode_ci` to `my.cnf`
+- When migrating data between charsets, always check whether the stored bytes already are the target encoding (the `latin1` stores UTF-8 trap)
+- Specify `CHARACTER SET utf8mb4` in `LOAD DATA INFILE` and `mysqlimport` commands
+
+
+
+Bytebase's [SQL Review](https://docs.bytebase.com/sql-review/review-rules/) can enforce `utf8mb4` as the required character set for all new tables and columns, preventing charset mismatches before they reach production. See also [ERROR 1071: Specified Key Was Too Long](/reference/mysql/error/1071-specified-key-was-too-long) if converting to `utf8mb4` causes index size issues.
+
+
diff --git a/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md b/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
new file mode 100644
index 00000000..a507c5d0
--- /dev/null
+++ b/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
@@ -0,0 +1,262 @@
+---
+title: 'ERROR 22P02: Invalid Input Syntax for Type in Postgres'
+---
+
+## Error Message
+
+```sql
+ERROR: invalid input syntax for type integer: "abc"
+SQLSTATE: 22P02
+```
+
+Other common variations:
+
+```sql
+ERROR: invalid input syntax for type uuid: "not-a-uuid"
+ERROR: invalid input syntax for type boolean: "yes"
+ERROR: invalid input syntax for type timestamp: "04-13-2026"
+ERROR: invalid input syntax for type json: "{ invalid }"
+```
+
+## What Triggers This Error
+
+PostgreSQL 22P02 fires when a value cannot be parsed into the expected data type. Unlike MySQL, Postgres does not silently coerce types — it rejects bad input immediately. The fix depends on which type conversion failed:
+
+- **String where integer expected** — a query passes text like `'abc'` to an integer column or parameter
+- **Malformed UUID** — a string that doesn't match the UUID format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
+- **Invalid boolean value** — using `'yes'`/`'no'` instead of PostgreSQL's accepted boolean literals
+- **Timestamp format mismatch** — date string doesn't match the expected format or `datestyle` setting
+- **Invalid JSON literal** — malformed JSON in a `json` or `jsonb` column
+
+## Fix by Scenario
+
+### String passed where integer expected
+
+The most common cause. Usually happens when application code passes unsanitized user input directly into a query, or when a query parameter binding fails.
+
+```sql
+-- This fails
+SELECT * FROM users WHERE id = 'abc';
+-- ERROR: invalid input syntax for type integer: "abc"
+
+-- Also fails with empty strings
+SELECT * FROM users WHERE id = '';
+-- ERROR: invalid input syntax for type integer: ""
+```
+
+**Fix:**
+
+1. Validate input in the application layer before querying:
+
+```python
+# Bad: passes raw input to query
+user_id = request.args.get('id') # could be 'abc' or ''
+cursor.execute("SELECT * FROM users WHERE id = %s", (user_id,))
+
+# Good: validate first
+user_id = request.args.get('id')
+try:
+ user_id = int(user_id)
+except (ValueError, TypeError):
+ return {"error": "Invalid user ID"}, 400
+cursor.execute("SELECT * FROM users WHERE id = %s", (user_id,))
+```
+
+2. If null or empty values are possible, handle them explicitly:
+
+```sql
+-- Use NULLIF to convert empty strings to NULL
+SELECT * FROM users WHERE id = NULLIF(:input, '')::integer;
+```
+
+3. If the column should accept mixed types, check with a regex first:
+
+```sql
+SELECT * FROM users
+WHERE :input ~ '^\d+$'
+ AND id = :input::integer;
+```
+
+### Malformed UUID
+
+UUID columns are strict — the value must be exactly 32 hex digits in the `8-4-4-4-12` format.
+
+```sql
+-- These all fail
+SELECT * FROM sessions WHERE session_id = 'not-a-uuid';
+SELECT * FROM sessions WHERE session_id = '12345';
+SELECT * FROM sessions WHERE session_id = '';
+```
+
+**Fix:**
+
+1. Validate UUID format before querying:
+
+```python
+import uuid
+
+def is_valid_uuid(val):
+ try:
+ uuid.UUID(str(val))
+ return True
+ except ValueError:
+ return False
+
+session_id = request.args.get('session_id')
+if not is_valid_uuid(session_id):
+ return {"error": "Invalid session ID"}, 400
+```
+
+2. In SQL, use a safe cast to avoid the error:
+
+```sql
+-- Returns NULL instead of raising an error (PostgreSQL 16+)
+SELECT * FROM sessions
+WHERE session_id = CAST(:input AS uuid DEFAULT NULL ON ERROR);
+```
+
+For PostgreSQL versions before 16, create a safe cast function:
+
+```sql
+CREATE OR REPLACE FUNCTION try_cast_uuid(text)
+RETURNS uuid AS $$
+BEGIN
+ RETURN $1::uuid;
+EXCEPTION WHEN invalid_text_representation THEN
+ RETURN NULL;
+END;
+$$ LANGUAGE plpgsql IMMUTABLE;
+
+SELECT * FROM sessions WHERE session_id = try_cast_uuid(:input);
+```
+
+### Invalid boolean value
+
+PostgreSQL accepts `true`/`false`, `t`/`f`, `yes`/`no`, `on`/`off`, `1`/`0` — but not arbitrary strings.
+
+```sql
+-- These work
+SELECT * FROM users WHERE active = true;
+SELECT * FROM users WHERE active = 'yes';
+SELECT * FROM users WHERE active = '1';
+
+-- These fail
+SELECT * FROM users WHERE active = 'Y';
+SELECT * FROM users WHERE active = 'active';
+SELECT * FROM users WHERE active = 'enabled';
+```
+
+**Fix:**
+
+1. Map application values to PostgreSQL booleans:
+
+```python
+bool_map = {'Y': True, 'N': False, 'active': True, 'inactive': False}
+active = bool_map.get(request.args.get('active'))
+if active is None:
+ return {"error": "Invalid boolean value"}, 400
+cursor.execute("SELECT * FROM users WHERE active = %s", (active,))
+```
+
+2. In SQL, use a CASE expression for non-standard values:
+
+```sql
+SELECT * FROM users
+WHERE active = CASE :input
+ WHEN 'Y' THEN true
+ WHEN 'N' THEN false
+ WHEN 'active' THEN true
+ WHEN 'inactive' THEN false
+END;
+```
+
+### Timestamp format mismatch
+
+PostgreSQL parses timestamps based on the `datestyle` setting. The default `ISO, MDY` interprets `01-02-2026` as January 2nd, but `13-01-2026` fails because there's no month 13.
+
+```sql
+-- Check your current datestyle
+SHOW datestyle; -- e.g., 'ISO, MDY'
+
+-- This fails with MDY datestyle (no month 13)
+SELECT '13-01-2026'::date;
+-- ERROR: invalid input syntax for type date: "13-01-2026"
+
+-- This works — ISO 8601 format is always unambiguous
+SELECT '2026-01-13'::date;
+```
+
+**Fix:**
+
+1. Always use ISO 8601 format (`YYYY-MM-DD`) — it works regardless of `datestyle`:
+
+```sql
+-- Always safe
+SELECT * FROM events WHERE created_at > '2026-04-13';
+```
+
+2. Use `TO_DATE` or `TO_TIMESTAMP` with an explicit format:
+
+```sql
+SELECT TO_DATE('13-01-2026', 'DD-MM-YYYY');
+SELECT TO_TIMESTAMP('04/13/2026 15:30', 'MM/DD/YYYY HH24:MI');
+```
+
+3. In application code, format dates before sending:
+
+```python
+from datetime import date
+# Always send ISO format
+cursor.execute("SELECT * FROM events WHERE created_at > %s", (date(2026, 4, 13).isoformat(),))
+```
+
+### Invalid JSON literal in `jsonb` column
+
+Inserting or casting malformed JSON into a `json` or `jsonb` column triggers 22P02.
+
+```sql
+-- These fail
+SELECT '{ name: "test" }'::jsonb; -- keys must be quoted
+SELECT '{ "name": undefined }'::jsonb; -- undefined is not valid JSON
+SELECT "{ \"name\": \"test\" }"::jsonb; -- double-quoted string, not a literal
+
+-- This works
+SELECT '{ "name": "test" }'::jsonb;
+```
+
+**Fix:**
+
+1. Validate JSON in the application before inserting:
+
+```python
+import json
+
+data = request.get_json()
+try:
+ json_str = json.dumps(data) # ensures valid JSON
+except (TypeError, ValueError) as e:
+ return {"error": f"Invalid JSON: {e}"}, 400
+
+cursor.execute("INSERT INTO configs (data) VALUES (%s::jsonb)", (json_str,))
+```
+
+2. Use `jsonb_typeof` to test if a string is valid JSON:
+
+```sql
+-- Returns NULL for invalid JSON instead of raising an error
+SELECT jsonb_typeof(:input::jsonb);
+```
+
+## Prevention
+
+- Always use parameterized queries with proper type bindings — let the database driver handle type conversion
+- Validate user input at the application boundary before it reaches SQL
+- Use ISO 8601 format (`YYYY-MM-DD`, `YYYY-MM-DDTHH:MM:SS`) for all date and timestamp values
+- For UUID columns, validate format client-side before querying
+- In PostgreSQL 16+, use `CAST(... DEFAULT ... ON ERROR)` for safe type conversion
+
+
+
+Bytebase's [SQL Review](https://docs.bytebase.com/sql-review/review-rules/) can enforce column type consistency across your schema, catching potential type mismatches before they cause runtime errors. See also [ERROR 42804: Datatype Mismatch](/reference/postgres/error/42804-datatype-mismatch) for type errors in expressions and assignments.
+
+
From 7b9b99754764cc75992f366590dfa7ee720a3d6b Mon Sep 17 00:00:00 2001
From: adela
Date: Fri, 10 Apr 2026 17:09:07 +0200
Subject: [PATCH 2/2] fix
---
.../error/1205-lock-wait-timeout-exceeded.md | 22 ++------------
.../22p02-invalid-input-syntax-postgres.md | 30 ++++++++++++-------
2 files changed, 23 insertions(+), 29 deletions(-)
diff --git a/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md b/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
index c9eb6a87..7124ebe7 100644
--- a/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
+++ b/content/reference/mysql/error/1205-lock-wait-timeout-exceeded.md
@@ -90,28 +90,12 @@ connection.commit()
A single `UPDATE` or `DELETE` affecting thousands of rows locks them all for the duration of the statement. Other transactions waiting for any of those rows will time out.
```sql
--- This locks every row in the table for the entire execution
+-- This locks the rows matched by the WHERE clause for the duration of the statement
UPDATE orders SET status = 'archived' WHERE created_at < '2025-01-01';
--- Could take minutes — every other transaction touching `orders` waits
+-- Could take minutes — other transactions touching those rows may wait
```
-**Fix:** Break the operation into smaller batches:
-
-```sql
--- Process 1000 rows at a time
-SET @batch_size = 1000;
-SET @rows_affected = 1;
-
-WHILE @rows_affected > 0 DO
- UPDATE orders SET status = 'archived'
- WHERE created_at < '2025-01-01' AND status != 'archived'
- LIMIT 1000;
- SET @rows_affected = ROW_COUNT();
- -- Brief pause to let other transactions through
-END WHILE;
-```
-
-Or in application code:
+**Fix:** Break the operation into smaller batches in application code:
```python
batch_size = 1000
diff --git a/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md b/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
index a507c5d0..755eb6ca 100644
--- a/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
+++ b/content/reference/postgres/error/22p02-invalid-input-syntax-postgres.md
@@ -24,7 +24,7 @@ PostgreSQL 22P02 fires when a value cannot be parsed into the expected data type
- **String where integer expected** — a query passes text like `'abc'` to an integer column or parameter
- **Malformed UUID** — a string that doesn't match the UUID format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
-- **Invalid boolean value** — using `'yes'`/`'no'` instead of PostgreSQL's accepted boolean literals
+- **Invalid boolean value** — using unrecognized strings like `'Y'`, `'active'`, or `'enabled'` instead of PostgreSQL's accepted boolean literals
- **Timestamp format mismatch** — date string doesn't match the expected format or `datestyle` setting
- **Invalid JSON literal** — malformed JSON in a `json` or `jsonb` column
@@ -132,18 +132,19 @@ SELECT * FROM sessions WHERE session_id = try_cast_uuid(:input);
### Invalid boolean value
-PostgreSQL accepts `true`/`false`, `t`/`f`, `yes`/`no`, `on`/`off`, `1`/`0` — but not arbitrary strings.
+PostgreSQL accepts `true`/`false`, `t`/`f`, `y`/`n`, `yes`/`no`, `on`/`off`, `1`/`0` (all case-insensitive) — but not arbitrary strings.
```sql
-- These work
SELECT * FROM users WHERE active = true;
SELECT * FROM users WHERE active = 'yes';
+SELECT * FROM users WHERE active = 'Y';
SELECT * FROM users WHERE active = '1';
-- These fail
-SELECT * FROM users WHERE active = 'Y';
SELECT * FROM users WHERE active = 'active';
SELECT * FROM users WHERE active = 'enabled';
+SELECT * FROM users WHERE active = 'TRUE1';
```
**Fix:**
@@ -151,7 +152,7 @@ SELECT * FROM users WHERE active = 'enabled';
1. Map application values to PostgreSQL booleans:
```python
-bool_map = {'Y': True, 'N': False, 'active': True, 'inactive': False}
+bool_map = {'active': True, 'inactive': False, 'enabled': True, 'disabled': False}
active = bool_map.get(request.args.get('active'))
if active is None:
return {"error": "Invalid boolean value"}, 400
@@ -163,10 +164,10 @@ cursor.execute("SELECT * FROM users WHERE active = %s", (active,))
```sql
SELECT * FROM users
WHERE active = CASE :input
- WHEN 'Y' THEN true
- WHEN 'N' THEN false
WHEN 'active' THEN true
WHEN 'inactive' THEN false
+ WHEN 'enabled' THEN true
+ WHEN 'disabled' THEN false
END;
```
@@ -218,7 +219,7 @@ Inserting or casting malformed JSON into a `json` or `jsonb` column triggers 22P
-- These fail
SELECT '{ name: "test" }'::jsonb; -- keys must be quoted
SELECT '{ "name": undefined }'::jsonb; -- undefined is not valid JSON
-SELECT "{ \"name\": \"test\" }"::jsonb; -- double-quoted string, not a literal
+SELECT '{ "name": "test", }'::jsonb; -- trailing comma is not valid JSON
-- This works
SELECT '{ "name": "test" }'::jsonb;
@@ -240,11 +241,20 @@ except (TypeError, ValueError) as e:
cursor.execute("INSERT INTO configs (data) VALUES (%s::jsonb)", (json_str,))
```
-2. Use `jsonb_typeof` to test if a string is valid JSON:
+2. If you need server-side validation, create a helper function:
```sql
--- Returns NULL for invalid JSON instead of raising an error
-SELECT jsonb_typeof(:input::jsonb);
+CREATE OR REPLACE FUNCTION try_cast_jsonb(text)
+RETURNS jsonb AS $$
+BEGIN
+ RETURN $1::jsonb;
+EXCEPTION WHEN invalid_text_representation THEN
+ RETURN NULL;
+END;
+$$ LANGUAGE plpgsql IMMUTABLE;
+
+-- Returns NULL instead of raising an error
+SELECT try_cast_jsonb('{ invalid }');
```
## Prevention