commit - f07d448977e29aafd42d798057e30d071e3576bd
commit + a6dde246c6ce7883360973b2fdcc5006cc089de7
blob - c66764b58dec5a0c6334c21156c26685b5ea9c3b
blob + 18e282105889b1bb8c3ebd7cbc6c3aa314b695df
--- sys/dev/ic/psp.c
+++ sys/dev/ic/psp.c
-/* $OpenBSD: psp.c,v 1.16 2025/04/25 19:10:50 bluhm Exp $ */
+/* $OpenBSD: psp.c,v 1.17 2025/05/16 13:54:33 mpi Exp $ */
/*
* Copyright (c) 2023, 2024 Hans-Joerg Hoexer <hshoexer@genua.de>
end = start + size;
/* Wire mapping. */
- error = uvm_map_pageable(&p->p_vmspace->vm_map, start, end, FALSE, 0);
+ vm_map_lock(&p->p_vmspace->vm_map);
+ error = uvm_map_pageable(&p->p_vmspace->vm_map, start, end, FALSE);
+ vm_map_unlock(&p->p_vmspace->vm_map);
if (error)
goto out;
* Unwire again. Ignore new error. Error has either been set,
* or PSP command has already succeeded.
*/
- (void) uvm_map_pageable(&p->p_vmspace->vm_map, start, end, TRUE, 0);
+ vm_map_lock(&p->p_vmspace->vm_map);
+ (void) uvm_map_pageable(&p->p_vmspace->vm_map, start, end, TRUE);
+ vm_map_unlock(&p->p_vmspace->vm_map);
return (error);
}
blob - efe4281b26c4059a65e110507744736b503abcda
blob + 6b3a588c1f921ca4d9c34af606a0752cdbc394ec
--- sys/uvm/uvm_extern.h
+++ sys/uvm/uvm_extern.h
-/* $OpenBSD: uvm_extern.h,v 1.180 2024/11/19 06:18:26 jsg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.181 2025/05/16 13:54:34 mpi Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
#define UVM_PLA_USERESERVE 0x0040 /* can allocate from kernel reserve */
/*
- * lockflags that control the locking behavior of various functions.
- */
-#define UVM_LK_ENTER 0x00000001 /* map locked on entry */
-#define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */
-
-/*
* flags to uvm_page_physload.
*/
#define PHYSLOAD_DEVICE 0x01 /* don't add to the page queue */
struct uvm_object *, voff_t, vsize_t, unsigned int);
int uvm_mapanon(vm_map_t, vaddr_t *, vsize_t, vsize_t, unsigned int);
int uvm_map_pageable(vm_map_t, vaddr_t,
- vaddr_t, boolean_t, int);
+ vaddr_t, boolean_t);
int uvm_map_pageable_all(vm_map_t, int, vsize_t);
boolean_t uvm_map_checkprot(vm_map_t, vaddr_t,
vaddr_t, vm_prot_t);
blob - 79e482de8aadebe31783e09fd12bd8d7ef6c64f6
blob + fb323c94e0fe41e6df01331d37376e50232b0363
--- sys/uvm/uvm_glue.c
+++ sys/uvm/uvm_glue.c
-/* $OpenBSD: uvm_glue.c,v 1.88 2025/03/21 13:19:33 mpi Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.89 2025/05/16 13:54:34 mpi Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
{
struct vm_map *map = &p->p_vmspace->vm_map;
vaddr_t start, end;
+ int error;
start = trunc_page((vaddr_t)addr);
end = round_page((vaddr_t)addr + len);
if (end <= start)
return (EINVAL);
- return uvm_map_pageable(map, start, end, FALSE, 0);
+ vm_map_lock(map);
+ error = uvm_map_pageable(map, start, end, FALSE);
+ vm_map_unlock(map);
+ return error;
}
/*
end = round_page((vaddr_t)addr + len);
KASSERT(end > start);
- uvm_map_pageable(map, start, end, TRUE, 0);
+ vm_map_lock(map);
+ uvm_map_pageable(map, start, end, TRUE);
+ vm_map_unlock(map);
}
/*
blob - 6f2bf9f97b1c509cfd805c2734e41275fe8d6f17
blob + 61336199d82b7a223363fd83dbe04786eeaa93e2
--- sys/uvm/uvm_map.c
+++ sys/uvm/uvm_map.c
-/* $OpenBSD: uvm_map.c,v 1.341 2025/04/21 14:46:18 dv Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.342 2025/05/16 13:54:34 mpi Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
vaddr_t, vaddr_t);
int uvm_map_pageable_wire(struct vm_map*,
struct vm_map_entry*, struct vm_map_entry*,
- vaddr_t, vaddr_t, int);
+ vaddr_t, vaddr_t);
void uvm_map_setup_entries(struct vm_map*);
void uvm_map_setup_md(struct vm_map*);
void uvm_map_teardown(struct vm_map*);
/*
* Mark all entries from first until end (exclusive) as wired.
- *
- * Lockflags determines the lock state on return from this function.
- * Lock must be exclusive on entry.
*/
int
uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
- struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr,
- int lockflags)
+ struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr)
{
struct vm_map_entry *iter;
#ifdef DIAGNOSTIC
#endif
int error;
+ vm_map_assert_wrlock(map);
+
/*
* Wire pages in two passes:
*
vm_map_lock(map);
vm_map_unbusy(map);
- if (error) {
#ifdef DIAGNOSTIC
- if (timestamp_save != map->timestamp)
- panic("uvm_map_pageable_wire: stale map");
+ if (timestamp_save != map->timestamp)
+ panic("stale map");
#endif
+ if (error) {
/*
* first is no longer needed to restart loops.
* Use it as iterator to unmap successful mappings.
iter->wired_count--;
}
-
- if ((lockflags & UVM_LK_EXIT) == 0)
- vm_map_unlock(map);
- return error;
}
-
- if ((lockflags & UVM_LK_EXIT) == 0) {
- vm_map_unlock(map);
- } else {
-#ifdef DIAGNOSTIC
- if (timestamp_save != map->timestamp)
- panic("uvm_map_pageable_wire: stale map");
-#endif
- }
- return 0;
+ return error;
}
/*
* uvm_map_pageable: set pageability of a range in a map.
*
- * Flags:
- * UVM_LK_ENTER: map is already locked by caller
- * UVM_LK_EXIT: don't unlock map on exit
- *
* The full range must be in use (entries may not have fspace != 0).
* UVM_ET_HOLE counts as unmapped.
*/
int
uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
- boolean_t new_pageable, int lockflags)
+ boolean_t new_pageable)
{
struct vm_map_entry *first, *last, *tmp;
int error;
return EINVAL; /* why? see second XXX below */
KASSERT(map->flags & VM_MAP_PAGEABLE);
- if ((lockflags & UVM_LK_ENTER) == 0)
- vm_map_lock(map);
+ vm_map_assert_wrlock(map);
/*
* Find first entry.
error = 0;
out:
- if ((lockflags & UVM_LK_EXIT) == 0)
- vm_map_unlock(map);
return error;
} else {
/*
} else
tmp = last;
- return uvm_map_pageable_wire(map, first, tmp, start, end,
- lockflags);
+ return uvm_map_pageable_wire(map, first, tmp, start, end);
}
}
* uvm_map_pageable_wire will release lock
*/
return uvm_map_pageable_wire(map, RBT_MIN(uvm_map_addr, &map->addr),
- NULL, map->min_offset, map->max_offset, 0);
+ NULL, map->min_offset, map->max_offset);
}
/*
old_prot == PROT_NONE &&
new_prot != PROT_NONE) {
if (uvm_map_pageable(map, iter->start, iter->end,
- FALSE, UVM_LK_ENTER | UVM_LK_EXIT) != 0) {
+ FALSE) != 0) {
/*
* If locking the entry fails, remember the
* error if it's the first one. Note we
blob - 22982f032dddb643dc9def4cd5a4baf20cd83b4f
blob + 252f53f9295a6ed05c8d156e743a404d70ee0cdb
--- sys/uvm/uvm_mmap.c
+++ sys/uvm/uvm_mmap.c
-/* $OpenBSD: uvm_mmap.c,v 1.198 2025/04/06 20:20:11 kettenis Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.199 2025/05/16 13:54:34 mpi Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
syscallarg(const void *) addr;
syscallarg(size_t) len;
} */ *uap = v;
+ vm_map_t map = &p->p_vmspace->vm_map;
vaddr_t addr;
vsize_t size, pageoff;
int error;
return EAGAIN;
#ifdef pmap_wired_count
- if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
+ if (size + ptoa(pmap_wired_count(vm_map_pmap(map))) >
lim_cur(RLIMIT_MEMLOCK))
return EAGAIN;
#else
return error;
#endif
- error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
- 0);
+ vm_map_lock(map);
+ error = uvm_map_pageable(map, addr, addr+size, FALSE);
+ vm_map_unlock(map);
return error == 0 ? 0 : ENOMEM;
}
syscallarg(const void *) addr;
syscallarg(size_t) len;
} */ *uap = v;
+ vm_map_t map = &p->p_vmspace->vm_map;
vaddr_t addr;
vsize_t size, pageoff;
int error;
return error;
#endif
- error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
- 0);
+ vm_map_lock(map);
+ error = uvm_map_pageable(map, addr, addr+size, TRUE);
+ vm_map_unlock(map);
return error == 0 ? 0 : ENOMEM;
}
KERNEL_UNLOCK();
return error;
}
- /*
- * uvm_map_pageable() always returns the map
- * unlocked.
- */
- error = uvm_map_pageable(map, *addr, *addr + size,
- FALSE, UVM_LK_ENTER);
+
+ error = uvm_map_pageable(map, *addr, *addr + size, FALSE);
+ vm_map_unlock(map);
if (error != 0) {
/* unmap the region! */
uvm_unmap(map, *addr, *addr + size);