Line data Source code
1 : /* 2 : * Compatibility functions which bloat the callers too much to make inline. 3 : * All of the callers of these functions should be converted to use folios 4 : * eventually. 5 : */ 6 : 7 : #include <linux/migrate.h> 8 : #include <linux/pagemap.h> 9 : #include <linux/rmap.h> 10 : #include <linux/swap.h> 11 : #include "internal.h" 12 : 13 0 : struct address_space *page_mapping(struct page *page) 14 : { 15 0 : return folio_mapping(page_folio(page)); 16 : } 17 : EXPORT_SYMBOL(page_mapping); 18 : 19 0 : void unlock_page(struct page *page) 20 : { 21 0 : return folio_unlock(page_folio(page)); 22 : } 23 : EXPORT_SYMBOL(unlock_page); 24 : 25 0 : void end_page_writeback(struct page *page) 26 : { 27 0 : return folio_end_writeback(page_folio(page)); 28 : } 29 : EXPORT_SYMBOL(end_page_writeback); 30 : 31 0 : void wait_on_page_writeback(struct page *page) 32 : { 33 0 : return folio_wait_writeback(page_folio(page)); 34 : } 35 : EXPORT_SYMBOL_GPL(wait_on_page_writeback); 36 : 37 0 : void wait_for_stable_page(struct page *page) 38 : { 39 0 : return folio_wait_stable(page_folio(page)); 40 : } 41 : EXPORT_SYMBOL_GPL(wait_for_stable_page); 42 : 43 0 : void mark_page_accessed(struct page *page) 44 : { 45 0 : folio_mark_accessed(page_folio(page)); 46 0 : } 47 : EXPORT_SYMBOL(mark_page_accessed); 48 : 49 0 : bool set_page_writeback(struct page *page) 50 : { 51 0 : return folio_start_writeback(page_folio(page)); 52 : } 53 : EXPORT_SYMBOL(set_page_writeback); 54 : 55 0 : bool set_page_dirty(struct page *page) 56 : { 57 0 : return folio_mark_dirty(page_folio(page)); 58 : } 59 : EXPORT_SYMBOL(set_page_dirty); 60 : 61 0 : int __set_page_dirty_nobuffers(struct page *page) 62 : { 63 0 : return filemap_dirty_folio(page_mapping(page), page_folio(page)); 64 : } 65 : EXPORT_SYMBOL(__set_page_dirty_nobuffers); 66 : 67 0 : bool clear_page_dirty_for_io(struct page *page) 68 : { 69 0 : return folio_clear_dirty_for_io(page_folio(page)); 70 : } 71 : EXPORT_SYMBOL(clear_page_dirty_for_io); 72 : 73 0 : bool redirty_page_for_writepage(struct writeback_control *wbc, 74 : struct page *page) 75 : { 76 0 : return folio_redirty_for_writepage(wbc, page_folio(page)); 77 : } 78 : EXPORT_SYMBOL(redirty_page_for_writepage); 79 : 80 0 : void lru_cache_add_inactive_or_unevictable(struct page *page, 81 : struct vm_area_struct *vma) 82 : { 83 0 : folio_add_lru_vma(page_folio(page), vma); 84 0 : } 85 : 86 0 : int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 87 : pgoff_t index, gfp_t gfp) 88 : { 89 0 : return filemap_add_folio(mapping, page_folio(page), index, gfp); 90 : } 91 : EXPORT_SYMBOL(add_to_page_cache_lru); 92 : 93 : noinline 94 0 : struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 95 : int fgp_flags, gfp_t gfp) 96 : { 97 : struct folio *folio; 98 : 99 0 : folio = __filemap_get_folio(mapping, index, fgp_flags, gfp); 100 0 : if (IS_ERR(folio)) 101 : return NULL; 102 0 : return folio_file_page(folio, index); 103 : } 104 : EXPORT_SYMBOL(pagecache_get_page); 105 : 106 0 : struct page *grab_cache_page_write_begin(struct address_space *mapping, 107 : pgoff_t index) 108 : { 109 0 : return pagecache_get_page(mapping, index, FGP_WRITEBEGIN, 110 : mapping_gfp_mask(mapping)); 111 : } 112 : EXPORT_SYMBOL(grab_cache_page_write_begin); 113 : 114 0 : bool isolate_lru_page(struct page *page) 115 : { 116 0 : if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page")) 117 : return false; 118 0 : return folio_isolate_lru((struct folio *)page); 119 : } 120 : 121 0 : void putback_lru_page(struct page *page) 122 : { 123 0 : folio_putback_lru(page_folio(page)); 124 0 : } 125 : 126 : #ifdef CONFIG_MMU 127 0 : void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, 128 : unsigned long address) 129 : { 130 : VM_BUG_ON_PAGE(PageTail(page), page); 131 : 132 0 : return folio_add_new_anon_rmap((struct folio *)page, vma, address); 133 : } 134 : #endif