2 #include <linux/backing-dev.h>
3 #include <linux/falloc.h>
4 #include <linux/kvm_host.h>
5 #include <linux/pagemap.h>
6 #include <linux/anon_inodes.h>
13 struct list_head
entry;
21 folio = filemap_grab_folio(inode->i_mapping, index);
22 if (IS_ERR_OR_NULL(folio))
34 if (!folio_test_uptodate(folio)) {
35 unsigned long nr_pages = folio_nr_pages(folio);
38 for (i = 0; i < nr_pages; i++)
39 clear_highpage(folio_page(folio, i));
41 folio_mark_uptodate(folio);
54 bool flush =
false, found_memslot =
false;
55 struct kvm_memory_slot *slot;
56 struct kvm *kvm = gmem->
kvm;
59 xa_for_each_range(&gmem->
bindings, index, slot, start,
end - 1) {
60 pgoff_t pgoff = slot->gmem.pgoff;
62 struct kvm_gfn_range gfn_range = {
63 .start = slot->base_gfn + max(pgoff, start) - pgoff,
64 .end = slot->base_gfn + min(pgoff + slot->npages,
end) - pgoff,
73 kvm_mmu_invalidate_begin(kvm);
76 flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
89 struct kvm *kvm = gmem->
kvm;
91 if (xa_find(&gmem->
bindings, &start,
end - 1, XA_PRESENT)) {
93 kvm_mmu_invalidate_end(kvm);
100 struct list_head *gmem_list = &inode->i_mapping->i_private_list;
101 pgoff_t start = offset >> PAGE_SHIFT;
102 pgoff_t
end = (offset + len) >> PAGE_SHIFT;
109 filemap_invalidate_lock(inode->i_mapping);
111 list_for_each_entry(gmem, gmem_list,
entry)
114 truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
116 list_for_each_entry(gmem, gmem_list,
entry)
119 filemap_invalidate_unlock(inode->i_mapping);
126 struct address_space *mapping = inode->i_mapping;
127 pgoff_t start, index,
end;
131 if (offset + len > i_size_read(inode))
134 filemap_invalidate_lock_shared(mapping);
136 start = offset >> PAGE_SHIFT;
137 end = (offset + len) >> PAGE_SHIFT;
140 for (index = start; index <
end; ) {
143 if (signal_pending(current)) {
154 index = folio_next_index(folio);
160 if (WARN_ON_ONCE(!index))
166 filemap_invalidate_unlock_shared(mapping);
176 if (!(mode & FALLOC_FL_KEEP_SIZE))
179 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
182 if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
185 if (mode & FALLOC_FL_PUNCH_HOLE)
197 struct kvm_gmem *gmem = file->private_data;
198 struct kvm_memory_slot *slot;
199 struct kvm *kvm = gmem->
kvm;
209 mutex_lock(&kvm->slots_lock);
211 filemap_invalidate_lock(inode->i_mapping);
213 xa_for_each(&gmem->
bindings, index, slot)
214 rcu_assign_pointer(slot->gmem.file, NULL);
226 list_del(&gmem->
entry);
228 filemap_invalidate_unlock(inode->i_mapping);
230 mutex_unlock(&kvm->slots_lock);
248 return get_file_active(&slot->gmem.file);
252 .open = generic_file_open,
263 struct folio *dst,
struct folio *src,
264 enum migrate_mode mode)
272 struct list_head *gmem_list = &mapping->i_private_list;
276 filemap_invalidate_lock_shared(mapping);
278 start = folio->index;
279 end = start + folio_nr_pages(folio);
281 list_for_each_entry(gmem, gmem_list,
entry)
293 list_for_each_entry(gmem, gmem_list,
entry)
296 filemap_invalidate_unlock_shared(mapping);
301 static const struct address_space_operations
kvm_gmem_aops = {
302 .dirty_folio = noop_dirty_folio,
308 struct kstat *stat, u32 request_mask,
309 unsigned int query_flags)
311 struct inode *inode =
path->dentry->d_inode;
313 generic_fillattr(idmap, request_mask, inode, stat);
329 const char *anon_name =
"[kvm-gmem]";
335 fd = get_unused_fd_flags(0);
339 gmem = kzalloc(
sizeof(*gmem), GFP_KERNEL);
345 file = anon_inode_create_getfile(anon_name, &
kvm_gmem_fops, gmem,
352 file->f_flags |= O_LARGEFILE;
354 inode = file->f_inode;
355 WARN_ON(file->f_mapping != inode->i_mapping);
357 inode->i_private = (
void *)(
unsigned long)flags;
360 inode->i_mode |= S_IFREG;
361 inode->i_size =
size;
362 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
363 mapping_set_unmovable(inode->i_mapping);
365 WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
370 list_add(&gmem->
entry, &inode->i_mapping->i_private_list);
372 fd_install(fd, file);
384 loff_t
size = args->size;
385 u64 flags = args->flags;
388 if (flags & ~valid_flags)
391 if (
size <= 0 || !PAGE_ALIGNED(
size))
398 unsigned int fd, loff_t offset)
400 loff_t
size = slot->npages << PAGE_SHIFT;
401 unsigned long start,
end;
407 BUILD_BUG_ON(
sizeof(gfn_t) !=
sizeof(slot->gmem.pgoff));
416 gmem = file->private_data;
417 if (gmem->
kvm != kvm)
420 inode = file_inode(file);
422 if (offset < 0 || !PAGE_ALIGNED(offset) ||
423 offset +
size > i_size_read(inode))
426 filemap_invalidate_lock(inode->i_mapping);
428 start = offset >> PAGE_SHIFT;
429 end = start + slot->npages;
432 xa_find(&gmem->
bindings, &start,
end - 1, XA_PRESENT)) {
433 filemap_invalidate_unlock(inode->i_mapping);
442 rcu_assign_pointer(slot->gmem.file, file);
443 slot->gmem.pgoff = start;
445 xa_store_range(&gmem->
bindings, start,
end - 1, slot, GFP_KERNEL);
446 filemap_invalidate_unlock(inode->i_mapping);
461 unsigned long start = slot->gmem.pgoff;
462 unsigned long end = start + slot->npages;
474 gmem = file->private_data;
476 filemap_invalidate_lock(file->f_mapping);
477 xa_store_range(&gmem->
bindings, start,
end - 1, NULL, GFP_KERNEL);
478 rcu_assign_pointer(slot->gmem.file, NULL);
480 filemap_invalidate_unlock(file->f_mapping);
486 gfn_t gfn, kvm_pfn_t *pfn,
int *max_order)
488 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
499 gmem = file->private_data;
501 if (WARN_ON_ONCE(xa_load(&gmem->
bindings, index) != slot)) {
512 if (folio_test_hwpoison(folio)) {
517 page = folio_file_page(folio, index);
519 *pfn = page_to_pfn(page);
int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
static int kvm_gmem_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode)
static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, pgoff_t end)
static struct file * kvm_gmem_get_file(struct kvm_memory_slot *slot)
static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
static const struct address_space_operations kvm_gmem_aops
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, pgoff_t end)
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn)
void kvm_gmem_init(struct module *module)
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags)
static struct folio * kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
void kvm_gmem_unbind(struct kvm_memory_slot *slot)
static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
static struct file_operations kvm_gmem_fops
int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned int fd, loff_t offset)
static int kvm_gmem_release(struct inode *inode, struct file *file)
static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr)
static const struct inode_operations kvm_gmem_iops
void kvm_put_kvm(struct kvm *kvm)
void kvm_flush_remote_tlbs(struct kvm *kvm)
void kvm_get_kvm(struct kvm *kvm)
#define KVM_MMU_LOCK(kvm)
#define KVM_MMU_UNLOCK(kvm)