From 20d5664795801f21e74cda4f25fd2bfb62e6d2de Mon Sep 17 00:00:00 2001 From: You name Date: Fri, 6 Mar 2026 09:48:40 -0800 Subject: [PATCH] file-operations: throttle page cache during cross-device copies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When copying large files to a slow device (e.g. USB), GLib's splice() fills the page cache at RAM speed. Once dirty pages exceed the kernel's dirty_ratio the kernel blocks all writers, making Nemo (and the whole desktop) appear frozen for minutes. Fix this by periodically calling sync_file_range() + posix_fadvise() from the copy progress callback every 32 MiB: - SYNC_FILE_RANGE_WAIT_BEFORE: provides natural back-pressure by waiting for the previous chunk's writeback to finish, pacing the copy to actual device speed. - SYNC_FILE_RANGE_WRITE: starts async writeback for the new chunk. - POSIX_FADV_DONTNEED: drops pages whose writeback already completed, keeping memory usage bounded. This keeps dirty pages at ~32-64 MiB instead of many gigabytes, the progress bar advances at the real device speed, and no single syscall blocks for more than one 32 MiB chunk. Only affects cross-filesystem copies of regular files (the common fast-source → slow-USB case). Same-filesystem moves and copies are unaffected. Fixes: https://github.com/linuxmint/nemo/issues/3710 --- libnemo-private/nemo-file-operations.c | 76 ++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/libnemo-private/nemo-file-operations.c b/libnemo-private/nemo-file-operations.c index 9c3309441..43dcd944e 100644 --- a/libnemo-private/nemo-file-operations.c +++ b/libnemo-private/nemo-file-operations.c @@ -26,6 +26,10 @@ Pavel Cisler */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE /* for sync_file_range() */ +#endif + #include #include #include @@ -34,6 +38,7 @@ #include #include #include +#include #include #include @@ -4169,11 +4174,22 @@ remove_target_recursively (CommonJob *job, } +/* Interval (in bytes) for periodic page cache flushing during + * cross-filesystem copies. Every FLUSH_CHUNK_SIZE bytes, the progress + * callback initiates async writeback and drops clean pages so that dirty + * pages never accumulate enough to hit the kernel's dirty_ratio limit. + * See: https://github.com/linuxmint/nemo/issues/3710 */ +#define FLUSH_CHUNK_SIZE (32 * 1024 * 1024) /* 32 MiB */ + typedef struct { CopyMoveJob *job; goffset last_size; SourceInfo *source_info; TransferInfo *transfer_info; + /* Page cache flush state for cross-filesystem copies */ + GFile *dest; /* dest file, or NULL to skip flushing */ + int dest_fd; /* lazily opened O_RDONLY fd for dest */ + goffset last_flush_offset; /* last offset where we kicked writeback */ } ProgressData; static void @@ -4195,6 +4211,46 @@ copy_file_progress_callback (goffset current_num_bytes, pdata->source_info, pdata->transfer_info); } + + /* Periodically flush dest pages to prevent dirty page accumulation + * on slow devices (e.g. USB). Without this, splice() fills the + * page cache at RAM speed and the kernel hits dirty_ratio, blocking + * all writers for minutes. + * + * SYNC_FILE_RANGE_WAIT_BEFORE waits for any previously submitted + * writeback to complete (natural back-pressure that paces the copy + * to the device speed). SYNC_FILE_RANGE_WRITE then starts async + * writeback for the current chunk. FADV_DONTNEED drops pages whose + * writeback has already completed. */ + if (pdata->dest != NULL && + current_num_bytes - pdata->last_flush_offset >= FLUSH_CHUNK_SIZE) { + if (pdata->dest_fd < 0) { + char *path = g_file_get_path (pdata->dest); + if (path != NULL) { + pdata->dest_fd = open (path, O_RDONLY); + g_free (path); + } + } + + if (pdata->dest_fd >= 0) { + goffset flush_end = current_num_bytes; + + sync_file_range (pdata->dest_fd, + pdata->last_flush_offset, + flush_end - pdata->last_flush_offset, + SYNC_FILE_RANGE_WAIT_BEFORE | + SYNC_FILE_RANGE_WRITE); + + /* Drop pages from ranges whose writeback is likely done */ + if (pdata->last_flush_offset > 0) { + posix_fadvise (pdata->dest_fd, 0, + pdata->last_flush_offset, + POSIX_FADV_DONTNEED); + } + } + + pdata->last_flush_offset = current_num_bytes; + } } static gboolean @@ -4544,6 +4600,12 @@ copy_move_file (CopyMoveJob *copy_job, pdata.source_info = source_info; pdata.transfer_info = transfer_info; + /* Enable incremental page cache flushing for cross-filesystem copies + * of regular files (not moves, which are same-fs renames). */ + pdata.dest = (!copy_job->is_move && !same_fs) ? dest : NULL; + pdata.dest_fd = -1; + pdata.last_flush_offset = 0; + if (copy_job->is_move) { res = g_file_move (src, dest, flags, @@ -4560,6 +4622,20 @@ copy_move_file (CopyMoveJob *copy_job, &error); } + /* Clean up the fd the progress callback may have opened. + * On success, do a final async writeback + page cache drop. + * On failure, just close. */ + if (pdata.dest_fd >= 0) { + if (res) { + sync_file_range (pdata.dest_fd, 0, 0, + SYNC_FILE_RANGE_WRITE); + posix_fadvise (pdata.dest_fd, 0, 0, + POSIX_FADV_DONTNEED); + } + close (pdata.dest_fd); + pdata.dest_fd = -1; + } + if (res) { if (!copy_job->is_move) { /* Ignore errors here. Failure to copy metadata is not a hard error */