Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 2 additions & 18 deletions contrib/auto_explain/auto_explain.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,19 +305,9 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)

if (auto_explain_enabled())
{
/*
* Set up to track total elapsed time in ExecutorRun. Make sure the
* space is allocated in the per-query context so it will go away at
* ExecutorEnd.
*/
/* Set up to track total elapsed time in ExecutorRun. */
if (queryDesc->totaltime == NULL)
{
MemoryContext oldcxt;

oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL, false);
MemoryContextSwitchTo(oldcxt);
}
queryDesc->totaltime = InstrAlloc(INSTRUMENT_ALL);
}
}

Expand Down Expand Up @@ -381,12 +371,6 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
*/
oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);

/*
* Make sure stats accumulation is done. (Note: it's okay if several
* levels of hook all do this.)
*/
InstrEndLoop(queryDesc->totaltime);

/* Log plan if duration is exceeded. */
msec = INSTR_TIME_GET_MILLISEC(queryDesc->totaltime->total);
if (msec >= auto_explain_log_min_duration)
Expand Down
1 change: 1 addition & 0 deletions contrib/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ subdir('pg_logicalinspect')
subdir('pg_overexplain')
subdir('pg_prewarm')
subdir('pgrowlocks')
subdir('pg_session_buffer_usage')
subdir('pg_stat_statements')
subdir('pgstattuple')
subdir('pg_surgery')
Expand Down
23 changes: 23 additions & 0 deletions contrib/pg_session_buffer_usage/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# contrib/pg_session_buffer_usage/Makefile

MODULE_big = pg_session_buffer_usage
OBJS = \
$(WIN32RES) \
pg_session_buffer_usage.o

EXTENSION = pg_session_buffer_usage
DATA = pg_session_buffer_usage--1.0.sql
PGFILEDESC = "pg_session_buffer_usage - show buffer usage statistics for the current session"

REGRESS = pg_session_buffer_usage

ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
subdir = contrib/pg_session_buffer_usage
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
283 changes: 283 additions & 0 deletions contrib/pg_session_buffer_usage/expected/pg_session_buffer_usage.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
LOAD 'pg_session_buffer_usage';
CREATE EXTENSION pg_session_buffer_usage;
-- Verify all columns are non-negative
SELECT count(*) = 1 AS ok FROM pg_session_buffer_usage()
WHERE shared_blks_hit >= 0 AND shared_blks_read >= 0
AND shared_blks_dirtied >= 0 AND shared_blks_written >= 0
AND local_blks_hit >= 0 AND local_blks_read >= 0
AND local_blks_dirtied >= 0 AND local_blks_written >= 0
AND temp_blks_read >= 0 AND temp_blks_written >= 0
AND shared_blk_read_time >= 0 AND shared_blk_write_time >= 0
AND local_blk_read_time >= 0 AND local_blk_write_time >= 0
AND temp_blk_read_time >= 0 AND temp_blk_write_time >= 0;
ok
----
t
(1 row)

-- Verify counters increase after buffer activity
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

CREATE TEMP TABLE test_buf_activity (id int, data text);
INSERT INTO test_buf_activity SELECT i, repeat('x', 100) FROM generate_series(1, 1000) AS i;
SELECT count(*) FROM test_buf_activity;
count
-------
1000
(1 row)

SELECT local_blks_hit + local_blks_read > 0 AS blocks_increased
FROM pg_session_buffer_usage();
blocks_increased
------------------
t
(1 row)

DROP TABLE test_buf_activity;
-- Parallel query test
CREATE TABLE par_dc_tab (a int, b char(200));
INSERT INTO par_dc_tab SELECT i, repeat('x', 200) FROM generate_series(1, 5000) AS i;
SELECT count(*) FROM par_dc_tab;
count
-------
5000
(1 row)

-- Measure serial scan delta (leader does all the work)
SET max_parallel_workers_per_gather = 0;
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT count(*) FROM par_dc_tab;
count
-------
5000
(1 row)

CREATE TEMP TABLE dc_serial_result AS
SELECT shared_blks_hit AS serial_delta FROM pg_session_buffer_usage();
-- Measure parallel scan delta with leader NOT participating in scanning.
-- Workers do all table scanning; leader only runs the Gather node.
SET parallel_setup_cost = 0;
SET parallel_tuple_cost = 0;
SET min_parallel_table_scan_size = 0;
SET max_parallel_workers_per_gather = 2;
SET parallel_leader_participation = off;
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT count(*) FROM par_dc_tab;
count
-------
5000
(1 row)

-- Confirm we got a similar hit counter through parallel worker accumulation
SELECT shared_blks_hit > s.serial_delta / 2 AND shared_blks_hit < s.serial_delta * 2
AS leader_buffers_match
FROM pg_session_buffer_usage(), dc_serial_result s;
leader_buffers_match
----------------------
t
(1 row)

RESET parallel_setup_cost;
RESET parallel_tuple_cost;
RESET min_parallel_table_scan_size;
RESET max_parallel_workers_per_gather;
RESET parallel_leader_participation;
DROP TABLE par_dc_tab, dc_serial_result;
--
-- Abort/exception tests: verify buffer usage survives various error paths.
--
-- Rolled-back divide-by-zero under EXPLAIN ANALYZE
CREATE TEMP TABLE exc_tab (a int, b char(20));
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

EXPLAIN (ANALYZE, BUFFERS, COSTS OFF)
WITH ins AS (INSERT INTO exc_tab VALUES (1, 'aaa') RETURNING a)
SELECT a / 0 FROM ins;
ERROR: division by zero
SELECT local_blks_dirtied > 0 AS exception_buffers_visible
FROM pg_session_buffer_usage();
exception_buffers_visible
---------------------------
t
(1 row)

DROP TABLE exc_tab;
-- Unique constraint violation in regular query
CREATE TEMP TABLE unique_tab (a int UNIQUE, b char(20));
INSERT INTO unique_tab VALUES (1, 'first');
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

INSERT INTO unique_tab VALUES (1, 'duplicate');
ERROR: duplicate key value violates unique constraint "unique_tab_a_key"
DETAIL: Key (a)=(1) already exists.
SELECT local_blks_hit > 0 AS unique_violation_buffers_visible
FROM pg_session_buffer_usage();
unique_violation_buffers_visible
----------------------------------
t
(1 row)

DROP TABLE unique_tab;
-- Caught exception in PL/pgSQL subtransaction (BEGIN...EXCEPTION)
CREATE TEMP TABLE subxact_tab (a int, b char(20));
CREATE FUNCTION subxact_exc_func() RETURNS text AS $$
BEGIN
BEGIN
EXECUTE 'EXPLAIN (ANALYZE, BUFFERS, COSTS OFF)
WITH ins AS (INSERT INTO subxact_tab VALUES (1, ''aaa'') RETURNING a)
SELECT a / 0 FROM ins';
EXCEPTION WHEN division_by_zero THEN
RETURN 'caught';
END;
RETURN 'not reached';
END;
$$ LANGUAGE plpgsql;
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT subxact_exc_func();
subxact_exc_func
------------------
caught
(1 row)

SELECT local_blks_dirtied > 0 AS subxact_buffers_visible
FROM pg_session_buffer_usage();
subxact_buffers_visible
-------------------------
t
(1 row)

DROP FUNCTION subxact_exc_func;
DROP TABLE subxact_tab;
-- Cursor (FOR loop) in aborted subtransaction; verify post-exception tracking
CREATE TEMP TABLE cursor_tab (a int, b char(200));
INSERT INTO cursor_tab SELECT i, repeat('x', 200) FROM generate_series(1, 500) AS i;
CREATE FUNCTION cursor_exc_func() RETURNS text AS $$
DECLARE
rec record;
cnt int := 0;
BEGIN
BEGIN
FOR rec IN SELECT * FROM cursor_tab LOOP
cnt := cnt + 1;
IF cnt = 250 THEN
PERFORM 1 / 0;
END IF;
END LOOP;
EXCEPTION WHEN division_by_zero THEN
RETURN 'caught after ' || cnt || ' rows';
END;
RETURN 'not reached';
END;
$$ LANGUAGE plpgsql;
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT cursor_exc_func();
cursor_exc_func
-----------------------
caught after 250 rows
(1 row)

SELECT local_blks_hit + local_blks_read > 0
AS cursor_subxact_buffers_visible
FROM pg_session_buffer_usage();
cursor_subxact_buffers_visible
--------------------------------
t
(1 row)

DROP FUNCTION cursor_exc_func;
DROP TABLE cursor_tab;
-- Parallel worker abort: worker buffer activity is currently NOT propagated on abort.
--
-- When a parallel worker aborts, InstrEndParallelQuery and
-- ExecParallelReportInstrumentation never run, so the worker's buffer
-- activity is never written to shared memory, despite the information having been
-- captured by the res owner release instrumentation handling.
CREATE TABLE par_abort_tab (a int, b char(200));
INSERT INTO par_abort_tab SELECT i, repeat('x', 200) FROM generate_series(1, 5000) AS i;
-- Warm shared buffers so all reads become hits
SELECT count(*) FROM par_abort_tab;
count
-------
5000
(1 row)

-- Measure serial scan delta as a reference (leader reads all blocks)
SET max_parallel_workers_per_gather = 0;
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT b::int2 FROM par_abort_tab WHERE a > 1000;
ERROR: invalid input syntax for type smallint: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
CREATE TABLE par_abort_serial_result AS
SELECT shared_blks_hit AS serial_delta FROM pg_session_buffer_usage();
-- Now force parallel with leader NOT participating in scanning
SET parallel_setup_cost = 0;
SET parallel_tuple_cost = 0;
SET min_parallel_table_scan_size = 0;
SET max_parallel_workers_per_gather = 2;
SET parallel_leader_participation = off;
SET debug_parallel_query = on; -- Ensure we get CONTEXT line consistently
SELECT pg_session_buffer_usage_reset();
pg_session_buffer_usage_reset
-------------------------------

(1 row)

SELECT b::int2 FROM par_abort_tab WHERE a > 1000;
ERROR: invalid input syntax for type smallint: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
CONTEXT: parallel worker
RESET debug_parallel_query;
-- Workers scanned the table but aborted before reporting stats back.
-- The leader's delta should be much less than a serial scan, documenting
-- that worker buffer activity is lost on abort.
SELECT shared_blks_hit < s.serial_delta / 2
AS worker_abort_buffers_not_propagated
FROM pg_session_buffer_usage(), par_abort_serial_result s;
worker_abort_buffers_not_propagated
-------------------------------------
t
(1 row)

RESET parallel_setup_cost;
RESET parallel_tuple_cost;
RESET min_parallel_table_scan_size;
RESET max_parallel_workers_per_gather;
RESET parallel_leader_participation;
DROP TABLE par_abort_tab, par_abort_serial_result;
-- Cleanup
DROP EXTENSION pg_session_buffer_usage;
34 changes: 34 additions & 0 deletions contrib/pg_session_buffer_usage/meson.build
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Copyright (c) 2026, PostgreSQL Global Development Group

pg_session_buffer_usage_sources = files(
'pg_session_buffer_usage.c',
)

if host_system == 'windows'
pg_session_buffer_usage_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
'--NAME', 'pg_session_buffer_usage',
'--FILEDESC', 'pg_session_buffer_usage - show buffer usage statistics for the current session',])
endif

pg_session_buffer_usage = shared_module('pg_session_buffer_usage',
pg_session_buffer_usage_sources,
kwargs: contrib_mod_args,
)
contrib_targets += pg_session_buffer_usage

install_data(
'pg_session_buffer_usage--1.0.sql',
'pg_session_buffer_usage.control',
kwargs: contrib_data_args,
)

tests += {
'name': 'pg_session_buffer_usage',
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
'regress': {
'sql': [
'pg_session_buffer_usage',
],
},
}
Loading