diff --git a/dkms.conf b/dkms.conf index 38100db..c932564 100644 --- a/dkms.conf +++ b/dkms.conf @@ -13,7 +13,7 @@ BUILT_MODULE_LOCATION[0]="kernel" # where we put it under the kernel modules directory DEST_MODULE_LOCATION[0]="/kernel/../updates/" # how to build it -MAKE[0]="./configure --prefix=/opt/xpmem; make clean; make install" +MAKE[0]="./configure --with-kernelvers=${kernelver} --prefix=/opt/xpmem; make clean; make install" # clean up command CLEAN="make distclean" diff --git a/kernel/xpmem_attach.c b/kernel/xpmem_attach.c index 26ec933..9749533 100644 --- a/kernel/xpmem_attach.c +++ b/kernel/xpmem_attach.c @@ -27,6 +27,12 @@ #include #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) +# define LOCK_FIELD(_mm) _mm->mmap_lock +#else +# define LOCK_FIELD(_mm) _mm->mmap_sem +#endif + static void xpmem_open_handler(struct vm_area_struct *vma) { @@ -239,9 +245,9 @@ xpmem_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) ret = xpmem_seg_down_read(seg_tg, seg, 1, 0); if (ret == -EAGAIN) { /* to avoid possible deadlock drop current->mm->mmap_sem */ - up_read(¤t->mm->mmap_sem); + up_read(LOCK_FIELD(¤t->mm)); ret = xpmem_seg_down_read(seg_tg, seg, 1, 1); - down_read(¤t->mm->mmap_sem); + down_read(LOCK_FIELD(¤t->mm)); vma_verification_needed = 1; } if (ret != 0) @@ -254,11 +260,11 @@ xpmem_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) * getting the smaller address first. */ if (current->mm < seg_tg->mm) { - down_read(&seg_tg->mm->mmap_sem); - } else if (!down_read_trylock(&seg_tg->mm->mmap_sem)) { - up_read(¤t->mm->mmap_sem); - down_read(&seg_tg->mm->mmap_sem); - down_read(¤t->mm->mmap_sem); + down_read(LOCK_FIELD(&seg_tg->mm)); + } else if (!down_read_trylock(LOCK_FIELD(&seg_tg->mm))) { + up_read(LOCK_FIELD(¤t->mm)); + down_read(LOCK_FIELD(&seg_tg->mm)); + down_read(LOCK_FIELD(¤t->mm)); vma_verification_needed = 1; } seg_tg_mmap_sem_locked = 1; @@ -342,7 +348,7 @@ xpmem_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) } out: if (seg_tg_mmap_sem_locked) - up_read(&seg_tg->mm->mmap_sem); + up_read(LOCK_FIELD(&seg_tg->mm)); if (att_locked) mutex_unlock(&att->mutex); @@ -495,10 +501,10 @@ xpmem_attach(struct file *file, xpmem_apid_t apid, off_t offset, size_t size, if (flags & MAP_FIXED) { struct vm_area_struct *existing_vma; - down_write(¤t->mm->mmap_sem); + down_write(LOCK_FIELD(¤t->mm)); existing_vma = find_vma_intersection(current->mm, vaddr, vaddr + size); - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); for ( ; existing_vma && existing_vma->vm_start < vaddr + size ; existing_vma = existing_vma->vm_next) { if (xpmem_is_vm_ops_set(existing_vma)) { @@ -515,9 +521,9 @@ xpmem_attach(struct file *file, xpmem_apid_t apid, off_t offset, size_t size, } att->at_vaddr = at_vaddr; - down_write(¤t->mm->mmap_sem); + down_write(LOCK_FIELD(¤t->mm)); vma = find_vma(current->mm, at_vaddr); - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); vma->vm_private_data = att; vma->vm_flags |= @@ -568,25 +574,25 @@ xpmem_detach(u64 at_vaddr) struct xpmem_attachment *att; struct vm_area_struct *vma; - down_write(¤t->mm->mmap_sem); + down_write(LOCK_FIELD(¤t->mm)); /* find the corresponding vma */ vma = find_vma(current->mm, at_vaddr); if (!vma || vma->vm_start > at_vaddr) { - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); return 0; } att = (struct xpmem_attachment *)vma->vm_private_data; if (!xpmem_is_vm_ops_set(vma) || att == NULL) { - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); return -EINVAL; } xpmem_att_ref(att); if (mutex_lock_killable(&att->mutex)) { xpmem_att_deref(att); - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); return -EINTR; } @@ -597,7 +603,7 @@ xpmem_detach(u64 at_vaddr) mutex_unlock(&att->invalidate_mutex); mutex_unlock(&att->mutex); xpmem_att_deref(att); - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); return 0; } att->flags |= XPMEM_FLAG_DESTROYING; @@ -612,7 +618,7 @@ xpmem_detach(u64 at_vaddr) xpmem_ap_deref(ap); mutex_unlock(&att->mutex); xpmem_att_deref(att); - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); return -EACCES; } @@ -631,7 +637,7 @@ xpmem_detach(u64 at_vaddr) /* NTH: drop the current mm semaphore before calling vm_munmap (which will * call down_write on the same semaphore) */ - up_write(¤t->mm->mmap_sem); + up_write(LOCK_FIELD(¤t->mm)); ret = vm_munmap(vma->vm_start, att->at_size); DBUG_ON(ret != 0); @@ -667,7 +673,7 @@ xpmem_detach_att(struct xpmem_access_permit *ap, struct xpmem_attachment *att) mm = current->mm ? current->mm : att->mm; /* must lock mmap_sem before att's sema to prevent deadlock */ - down_write(&mm->mmap_sem); + down_write(LOCK_FIELD(&mm)); mutex_lock(&att->mutex); /* ensure we aren't racing with MMU notifier PTE cleanup */ @@ -676,7 +682,7 @@ xpmem_detach_att(struct xpmem_access_permit *ap, struct xpmem_attachment *att) if (att->flags & XPMEM_FLAG_DESTROYING) { mutex_unlock(&att->invalidate_mutex); mutex_unlock(&att->mutex); - up_write(&mm->mmap_sem); + up_write(LOCK_FIELD(&mm)); return; } att->flags |= XPMEM_FLAG_DESTROYING; @@ -688,7 +694,7 @@ xpmem_detach_att(struct xpmem_access_permit *ap, struct xpmem_attachment *att) if (!vma || vma->vm_start > att->at_vaddr) { DBUG_ON(1); mutex_unlock(&att->mutex); - up_write(&mm->mmap_sem); + up_write(LOCK_FIELD(&mm)); return; } DBUG_ON(!xpmem_is_vm_ops_set(vma)); @@ -707,7 +713,7 @@ xpmem_detach_att(struct xpmem_access_permit *ap, struct xpmem_attachment *att) /* NTH: drop the semaphore and attachment lock before calling vm_munmap */ mutex_unlock(&att->mutex); - up_write(&mm->mmap_sem); + up_write(LOCK_FIELD(&mm)); /* NTH: if the current task does not have a memory descriptor * then there is nothing more to do. the memory mapping should @@ -751,7 +757,7 @@ xpmem_clear_PTEs_of_att(struct xpmem_attachment *att, u64 start, u64 end, } } else { /* Must lock mmap_sem before att's sema to prevent deadlock. */ - down_read(&att->mm->mmap_sem); + down_read(LOCK_FIELD(&att->mm)); mutex_lock(&att->mutex); } @@ -832,7 +838,7 @@ xpmem_clear_PTEs_of_att(struct xpmem_attachment *att, u64 start, u64 end, mutex_unlock(&att->invalidate_mutex); } else { mutex_unlock(&att->mutex); - up_read(&att->mm->mmap_sem); + up_read(LOCK_FIELD(&att->mm)); } } diff --git a/kernel/xpmem_misc.c b/kernel/xpmem_misc.c index a2099b8..9b46661 100644 --- a/kernel/xpmem_misc.c +++ b/kernel/xpmem_misc.c @@ -290,11 +290,11 @@ xpmem_debug_printk_procfs_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char buf; - + if(copy_from_user(&buf, buffer, 1)) return -EFAULT; - if (buf == '0') + if (buf == '0') xpmem_debug_on = 0; else if (buf == '1') xpmem_debug_on = 1; @@ -315,6 +315,15 @@ xpmem_debug_printk_procfs_open(struct inode *inode, struct file *file) return single_open(file, xpmem_debug_printk_procfs_show, NULL); } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +struct proc_ops xpmem_debug_printk_procfs_ops = { + .proc_lseek = seq_lseek, + .proc_read = seq_read, + .proc_write = xpmem_debug_printk_procfs_write, + .proc_open = xpmem_debug_printk_procfs_open, + .proc_release = single_release, +}; +#else struct file_operations xpmem_debug_printk_procfs_ops = { .owner = THIS_MODULE, .llseek = seq_lseek, @@ -323,3 +332,4 @@ struct file_operations xpmem_debug_printk_procfs_ops = { .open = xpmem_debug_printk_procfs_open, .release = single_release, }; +#endif /* kernel 5.6 */ diff --git a/kernel/xpmem_mmu_notifier.c b/kernel/xpmem_mmu_notifier.c index d2ed601..a7e0495 100644 --- a/kernel/xpmem_mmu_notifier.c +++ b/kernel/xpmem_mmu_notifier.c @@ -16,6 +16,17 @@ #include #include #include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) +/* + * This is not being picked up + * + */ +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int stride_shift, + bool freed_tables); +#endif #include #include diff --git a/kernel/xpmem_pfn.c b/kernel/xpmem_pfn.c index d06961c..dc4396f 100644 --- a/kernel/xpmem_pfn.c +++ b/kernel/xpmem_pfn.c @@ -621,11 +621,21 @@ xpmem_unpin_procfs_open(struct inode *inode, struct file *file) return single_open(file, xpmem_unpin_procfs_show, PDE_DATA(inode)); } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +struct proc_ops xpmem_unpin_procfs_ops = { + .proc_lseek = seq_lseek, + .proc_read = seq_read, + .proc_write = xpmem_unpin_procfs_write, + .proc_open = xpmem_unpin_procfs_open, + .proc_release = single_release, +}; +#else struct file_operations xpmem_unpin_procfs_ops = { .owner = THIS_MODULE, .llseek = seq_lseek, - .read = seq_read, - .write = xpmem_unpin_procfs_write, + .read = seq_read, + .write = xpmem_unpin_procfs_write, .open = xpmem_unpin_procfs_open, .release = single_release, }; +#endif /* kernel 5.6 */ diff --git a/kernel/xpmem_private.h b/kernel/xpmem_private.h index 7db3cc4..1eda48a 100644 --- a/kernel/xpmem_private.h +++ b/kernel/xpmem_private.h @@ -296,7 +296,11 @@ extern int xpmem_fork_end(void); #define XPMEM_TGID_STRING_LEN 11 extern spinlock_t xpmem_unpin_procfs_lock; extern struct proc_dir_entry *xpmem_unpin_procfs_dir; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +extern struct proc_ops xpmem_unpin_procfs_ops; +#else extern struct file_operations xpmem_unpin_procfs_ops; +#endif /* kernel 5.6 */ /* found in xpmem_main.c */ extern struct xpmem_partition *xpmem_my_part; @@ -339,7 +343,11 @@ extern int xpmem_seg_down_read(struct xpmem_thread_group *, struct xpmem_segment *, int, int); extern int xpmem_validate_access(struct xpmem_access_permit *, off_t, size_t, int, u64 *); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +extern struct proc_ops xpmem_debug_printk_procfs_ops; +#else extern struct file_operations xpmem_debug_printk_procfs_ops; +#endif /* kernel 5.6 */ /* found in xpmem_mmu_notifier.c */ extern int xpmem_mmu_notifier_init(struct xpmem_thread_group *); extern void xpmem_mmu_notifier_unlink(struct xpmem_thread_group *);