1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2024 Intel Corporation
*/
#include <drm/ttm/ttm_backup.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
*/
static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
{
return (unsigned long)idx + 1;
}
static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
{
return handle - 1;
}
/**
* ttm_backup_drop() - release memory associated with a handle
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
/**
* ttm_backup_copy_page() - Copy the contents of a previously backed
* up page
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
* @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
from_folio = shmem_read_folio(mapping, idx);
if (IS_ERR(from_folio))
return PTR_ERR(from_folio);
copy_highpage(dst, folio_file_page(from_folio, idx));
folio_put(from_folio);
return 0;
}
/**
* ttm_backup_backup_page() - Backup a page
* @backup: The struct backup pointer to use.
* @page: The page to back up.
* @writeback: Whether to perform immediate writeback of the page.
* This may have performance implications.
* @idx: A unique integer for each page and each struct backup.
* This allows the backup implementation to avoid managing
* its address space separately.
* @page_gfp: The gfp value used when the page was allocated.
* This is used for accounting purposes.
* @alloc_gfp: The gfp to be used when allocating memory.
*
* Context: If called from reclaim context, the caller needs to
* assert that the shrinker gfp has __GFP_FS set, to avoid
* deadlocking on lock_page(). If @writeback is set to true and
* called from reclaim context, the caller also needs to assert
* that the shrinker gfp has __GFP_IO set, since without it,
* we're not allowed to start backup IO.
*
* Return: A handle on success. Negative error code on failure.
*
* Note: This function could be extended to back up a folio and
* implementations would then split the folio internally if needed.
* Drawback is that the caller would then have to keep track of
* the folio size- and usage.
*/
s64
ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
if (IS_ERR(to_folio))
return PTR_ERR(to_folio);
folio_mark_accessed(to_folio);
folio_lock(to_folio);
folio_mark_dirty(to_folio);
copy_highpage(folio_file_page(to_folio, idx), page);
handle = ttm_backup_shmem_idx_to_handle(idx);
if (writeback && !folio_mapped(to_folio) &&
folio_clear_dirty_for_io(to_folio)) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1,
};
folio_set_reclaim(to_folio);
ret = shmem_writeout(to_folio, &wbc);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
* If writeout succeeds, it unlocks the folio. errors
* are otherwise dropped, since writeout is only best
* effort here.
*/
if (ret)
folio_unlock(to_folio);
} else {
folio_unlock(to_folio);
}
folio_put(to_folio);
return handle;
}
/**
* ttm_backup_fini() - Free the struct backup resources after last use.
* @backup: Pointer to the struct backup whose resources to free.
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
void ttm_backup_fini(struct file *backup)
{
fput(backup);
}
/**
* ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
* left for backup.
*
* This function is intended also for driver use to indicate whether a
* backup attempt is meaningful.
*
* Return: An approximate size of backup space available.
*/
u64 ttm_backup_bytes_avail(void)
{
/*
* The idea behind backing up to shmem is that shmem objects may
* eventually be swapped out. So no point swapping out if there
* is no or low swap-space available. But the accuracy of this
* number also depends on shmem actually swapping out backed-up
* shmem objects without too much buffering.
*/
return (u64)get_nr_swap_pages() << PAGE_SHIFT;
}
EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
/**
* ttm_backup_shmem_create() - Create a shmem-based struct backup.
* @size: The maximum size (in bytes) to back up.
*
* Create a backup utilizing shmem objects.
*
* Return: A pointer to a struct file on success,
* an error pointer on error.
*/
struct file *ttm_backup_shmem_create(loff_t size)
{
return shmem_file_setup("ttm shmem backup", size, 0);
}
|